@@ -55,8 +55,6 @@ def _validate_ref_impl_exists() -> None:
5555 _WARN_ONLY = {
5656 "cadence::quantized_w8a32_linear" ,
5757 "cadence::quantized_add" , # We should only support per_tensor variant, should remove
58- "cadence::idma_store" ,
59- "cadence::idma_load" ,
6058 "cadence::_softmax_f32_f32" ,
6159 "cadence::requantize" , # We should only support per_tensor variant, should remove
6260 "cadence::quantized_softmax.per_tensor" ,
@@ -70,13 +68,11 @@ def _validate_ref_impl_exists() -> None:
7068 "cadence::quantized_relu" , # We should only support per_tensor variant, should remove
7169 "cadence::linalg_svd" ,
7270 "cadence::quantized_conv2d_nhwc" , # We should only support per_tensor variant, should remove
73- "cadence::idma_copy" ,
7471 "cadence::quantize_per_tensor_asym16u" ,
7572 "cadence::dequantize_per_tensor_asym8s" ,
7673 "cadence::quantize_per_tensor_asym16s" ,
7774 "cadence::dequantize_per_tensor_asym16s" ,
7875 "cadence::quantized_softmax" ,
79- "cadence::idma_wait" ,
8076 "cadence::quantized_w8a32_gru" ,
8177 "cadence::quantized_layer_norm" , # We should only support per_tensor variant, should remove
8278 }
@@ -2003,6 +1999,7 @@ def im2row_per_tensor_meta(
20031999 )
20042000 return input .new_empty (output_size , dtype = input .dtype )
20052001
2002+
20062003@register_fake ("cadence::linalg_svd" )
20072004def linalg_svd_meta (
20082005 A : torch .Tensor ,
0 commit comments