|
13 | 13 | # New operator library with a custom namespace to allow fusion etc. |
14 | 14 | lib = Library("cortex_m", "DEF") |
15 | 15 |
|
16 | | -# Import these for the cadence function signatures. |
17 | | -import executorch.backends.cortex_m.cortex_m_ops_lib # noqa: F401 |
18 | | - |
19 | 16 | ### |
20 | 17 | # add.Tensor |
21 | 18 | ### |
@@ -148,19 +145,3 @@ def dequantize_per_tensor_impl( |
148 | 145 | return exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default( |
149 | 146 | input, scale, zero_point, quant_min, quant_max, dtype |
150 | 147 | ) |
151 | | - |
152 | | -lib.define( |
153 | | - "softmax(Tensor self, int dim, bool half_to_float) -> Tensor" |
154 | | -) |
155 | | -lib.define( |
156 | | - "softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)" |
157 | | -) |
158 | | -@impl(lib, "softmax", "CompositeExplicitAutograd") |
159 | | -def softmax_impl(self: torch.Tensor, dim: int, half_to_float: bool) -> torch.Tensor: |
160 | | - # Call your custom edge op or fallback |
161 | | - # return exir_ops.edge.cortex_m.softmax(self, dim, half_to_float) |
162 | | - # ctx = get_kernel_ctx() # gets KernelRuntimeContext* |
163 | | - return {} |
164 | | -@impl(lib, "softmax.out", "CompositeExplicitAutograd") |
165 | | -def softmax_out_impl(self: torch.Tensor, dim: int, half_to_float: bool, out: torch.Tensor) -> torch.Tensor: |
166 | | - return exir_ops.edge.cortex_m.softmax_out(self, dim, half_to_float, out) |
0 commit comments