From 3d520e25aaabd2266c98f0a21e7711f1dbe59c4b Mon Sep 17 00:00:00 2001 From: Scott McKay Date: Thu, 4 Feb 2021 17:53:38 +1000 Subject: [PATCH] Add more kernels that have typed registrations to the operators we track type usage for. --- .../operator_type_usage_processors.py | 34 +++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/tools/python/util/ort_format_model/operator_type_usage_processors.py b/tools/python/util/ort_format_model/operator_type_usage_processors.py index 3c179e33556c4..baee45c9b07fd 100644 --- a/tools/python/util/ort_format_model/operator_type_usage_processors.py +++ b/tools/python/util/ort_format_model/operator_type_usage_processors.py @@ -254,17 +254,32 @@ def add(processor): # - some known large kernels # # Ops we are ignoring currently so as not to produce meaningless/unused output: - # - Implementation is not type specific: - # If, Loop, Reshape, Scan, Shape, Squeeze, Unsqueeze + # - Implementation is type agnostic: + # ai.onnx: If, Loop, Reshape, Scan, Shape, Squeeze, Unsqueeze + # com.microsoft: DynamicQuantizeMatMul, MatMulIntegerToFloat # - Only one type supported in the ORT implementation: - # FusedConv, FusedGemm, FusedMatMul, TransposeMatMul + # com.microsoft: FusedConv, FusedGemm, FusedMatMul, TransposeMatMul # - Implementation does not have any significant type specific code: - # Concat, Flatten, Not, QLinearConv, Reshape, Shape, Squeeze, Unsqueeze - default_processor_onnx_ops = ['Add', 'AveragePool', 'BatchNormalization', 'Clip', 'Conv', - 'DequantizeLinear', 'Div', 'Equal', 'Exp', 'Expand', - 'Gemm', 'Greater', 'Less', 'MatMul', 'Max', 'Min', 'Mul', - 'NonMaxSuppression', 'NonZero', 'Pad', 'Range', 'Relu', 'Resize', - 'Sigmoid', 'Slice', 'Softmax', 'Split', 'Sub', 'Tile', 'TopK', 'Transpose'] + # ai.onnx: Concat, Flatten, Not, QLinearConv, Reshape, Shape, Squeeze, Unsqueeze + # + default_processor_onnx_ops = ['Abs', 'Add', 'ArgMax', 'ArgMin', 'AveragePool', + 'BatchNormalization', 'BitShift', + 'Ceil', 'Clip', 'Conv', 'CumSum', + 'DequantizeLinear', 'Div', + 'Equal', 'Exp', 'Expand', + 'Floor', + 'Gemm', 'Greater', + 'IsNaN' + 'Less', 'Log', 'LogSoftmax', 'LpNormalization', + 'MatMul', 'Max', 'Min', 'Mul', + 'Neg', 'NonMaxSuppression', 'NonZero', + 'Pad', + 'Range', 'Reciprocal', 'ReduceL1', 'ReduceL2', 'ReduceLogSum', 'ReduceLogSumExp', + 'ReduceMax', 'ReduceMean', 'ReduceMin', 'ReduceProd', 'ReduceSum', 'ReduceSumSquare', + 'Relu', 'Resize', 'RoiAlign', 'Round', + 'Sigmoid', 'Sin', 'Softmax', 'Split', 'Sqrt', 'Sub', + 'Tanh', 'Tile', 'TopK', 'Transpose', + 'Where'] internal_ops = ['QLinearAdd', 'QLinearMul'] @@ -295,7 +310,6 @@ def add(processor): # Operators that switch on output type add(DefaultTypeUsageProcessor('ai.onnx', 'ConstantOfShape', inputs=[], outputs=[0])) - add(DefaultTypeUsageProcessor('com.microsoft', 'DynamicQuantizeMatMul', inputs=[], outputs=[0])) # Random generator ops produce new data so we track the output type onnx_random_ops = ['RandomNormal', 'RandomNormalLike', 'RandomUniform', 'RandomUniformLike', 'Multinomial']