Skip to content

Commit a393191

Browse files
authored
Cadence ops: Get rid of linalg vector norm
Differential Revision: D84673741 Pull Request resolved: #15140
1 parent caa35f6 commit a393191

File tree

1 file changed

+0
-12
lines changed

1 file changed

+0
-12
lines changed

backends/cadence/aot/ops_registrations.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@ def _validate_ref_impl_exists() -> None:
6565
"cadence::dequantize_per_tensor_asym8u",
6666
"cadence::dequantize_per_tensor_asym32s",
6767
"cadence::dequantize_per_tensor_asym16u",
68-
"cadence::linalg_vector_norm",
6968
"cadence::quantized_conv2d_nchw", # We should only support per_tensor variant, should remove
7069
"cadence::quantize_per_tensor_asym32s",
7170
"cadence::quantized_relu", # We should only support per_tensor variant, should remove
@@ -447,7 +446,6 @@ def register_fake(
447446
"im2row.per_tensor(Tensor input, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, "
448447
"int in_zero_point, bool channel_last=False) -> (Tensor out)"
449448
)
450-
lib.define("linalg_vector_norm(Tensor X) -> (Tensor Y)")
451449
lib.define(
452450
"linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)"
453451
)
@@ -603,7 +601,6 @@ def register_fake(
603601
lib.define(
604602
"fully_connected.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)"
605603
)
606-
lib.define("linalg_vector_norm.out(Tensor X, *, Tensor(a!) out) -> Tensor(a!)")
607604
lib.define(
608605
"quantized_fully_connected.out(Tensor src, Tensor weight, Tensor bias, int src_zero_point, "
609606
"Tensor weight_zero_point, Tensor out_multiplier, Tensor out_shift, int out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!)"
@@ -2007,15 +2004,6 @@ def im2row_per_tensor_meta(
20072004
return input.new_empty(output_size, dtype=input.dtype)
20082005

20092006

2010-
# Define the abstract implementations of the operators as required
2011-
@register_fake("cadence::linalg_vector_norm")
2012-
def linalg_vector_norm_meta(
2013-
X: torch.Tensor,
2014-
) -> torch.Tensor:
2015-
# Output of norm is a scalar, so we return a [] tensor
2016-
return X.new_empty([], dtype=X.dtype)
2017-
2018-
20192007
@register_fake("cadence::linalg_svd")
20202008
def linalg_svd_meta(
20212009
A: torch.Tensor,

0 commit comments

Comments
 (0)