diff --git a/onnxruntime/core/providers/coreml/builders/helper.cc b/onnxruntime/core/providers/coreml/builders/helper.cc index 9ad0acee49095..45c10130f1aa5 100644 --- a/onnxruntime/core/providers/coreml/builders/helper.cc +++ b/onnxruntime/core/providers/coreml/builders/helper.cc @@ -13,17 +13,19 @@ namespace onnxruntime { namespace coreml { -Status GetShape(const NodeArg& node_arg, std::vector& shape) { - const auto& input_name = node_arg.Name(); +bool GetShape(const NodeArg& node_arg, std::vector& shape, const logging::Logger& logger) { const auto* shape_proto = node_arg.Shape(); - ORT_RETURN_IF_NOT(shape_proto, "shape_proto cannot be null for input: ", input_name); + if (!shape_proto) { + LOGS(logger, WARNING) << "NodeArg [" << node_arg.Name() << "] has no shape info"; + return false; + } // We already checked the shape has no dynamic dimension for (const auto& dim : shape_proto->dim()) { shape.push_back(dim.dim_value()); } - return Status::OK(); + return true; } // TODO, move this to shared_library diff --git a/onnxruntime/core/providers/coreml/builders/helper.h b/onnxruntime/core/providers/coreml/builders/helper.h index a098f8ee76407..9cba62d166692 100644 --- a/onnxruntime/core/providers/coreml/builders/helper.h +++ b/onnxruntime/core/providers/coreml/builders/helper.h @@ -16,7 +16,7 @@ class Logger; namespace coreml { -common::Status GetShape(const NodeArg& node_arg, std::vector& shape); +bool GetShape(const NodeArg& node_arg, std::vector& shape, const logging::Logger& logger); // TODO, move this to shared_library bool GetType(const NodeArg& node_arg, int32_t& type, const logging::Logger& logger); diff --git a/onnxruntime/core/providers/coreml/builders/impl/activation_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/activation_op_builder.cc index 0b35c554392ea..2b01ba433af2b 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/activation_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/activation_op_builder.cc @@ -42,7 +42,7 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, *layer->mutable_input()->Add() = node.InputDefs()[0]->Name(); *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); - model_builder.AddLayer(layer.release()); + model_builder.AddLayer(std::move(layer)); return Status::OK(); } diff --git a/onnxruntime/core/providers/coreml/builders/impl/batch_norm_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/batch_norm_op_builder.cc new file mode 100644 index 0000000000000..4cfe684c90659 --- /dev/null +++ b/onnxruntime/core/providers/coreml/builders/impl/batch_norm_op_builder.cc @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/providers/common.h" +#include "core/providers/shared/utils/utils.h" +#include "core/providers/coreml/builders/helper.h" +#include "core/providers/coreml/builders/model_builder.h" +#include "core/providers/coreml/builders/op_builder_factory.h" + +#include "base_op_builder.h" +#include "builder_utils.h" + +namespace onnxruntime { +namespace coreml { + +class BatchNormalizationOpBuilder : public BaseOpBuilder { + // Add operator related + public: + void AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const override; + + private: + Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, + const logging::Logger& logger) const override ORT_MUST_USE_RESULT; + + // Operator support related + private: + bool IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node, + const logging::Logger& logger) const override; + + // BatchNormalization opset 6- has unsupported attributes + int GetMinSupportedOpSet(const Node& /* node */) const override { return 7; } +}; + +// Add operator related + +void BatchNormalizationOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const { + // skip everything except input0 for BatchNormalization + const auto& input_defs = node.InputDefs(); + model_builder.AddInitializerToSkip(input_defs[1]->Name()); // scale + model_builder.AddInitializerToSkip(input_defs[2]->Name()); // B + model_builder.AddInitializerToSkip(input_defs[3]->Name()); // mean + model_builder.AddInitializerToSkip(input_defs[4]->Name()); // var +} + +Status BatchNormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, + const Node& node, + const logging::Logger& /* logger */) const { + std::unique_ptr layer = CreateNNLayer(node); + + const auto& input_defs = node.InputDefs(); + const auto& initializers(model_builder.GetInitializerTensors()); + NodeAttrHelper helper(node); + + const auto& scale_tensor = *initializers.at(input_defs[1]->Name()); + const auto& bias_tensor = *initializers.at(input_defs[2]->Name()); + const auto& mean_tensor = *initializers.at(input_defs[3]->Name()); + const auto& var_tensor = *initializers.at(input_defs[4]->Name()); + const auto eps = helper.Get("epsilon", 1e-5f); + const auto channels = scale_tensor.dims()[0]; + + auto* coreml_batch_norm = layer->mutable_batchnorm(); + coreml_batch_norm->set_channels(channels); + coreml_batch_norm->set_epsilon(eps); + coreml_batch_norm->set_computemeanvar(false); + coreml_batch_norm->set_instancenormalization(false); + + CreateCoreMLWeight(*coreml_batch_norm->mutable_gamma(), scale_tensor); // scale + CreateCoreMLWeight(*coreml_batch_norm->mutable_beta(), bias_tensor); // B + CreateCoreMLWeight(*coreml_batch_norm->mutable_mean(), mean_tensor); // mean + CreateCoreMLWeight(*coreml_batch_norm->mutable_variance(), var_tensor); // var + + *layer->mutable_input()->Add() = node.InputDefs()[0]->Name(); + *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); + + model_builder.AddLayer(std::move(layer)); + return Status::OK(); +} + +// Operator support related + +bool BatchNormalizationOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node, + const logging::Logger& logger) const { + if (node.OutputDefs().size() != 1) { + LOGS(logger, VERBOSE) << "Your onnx model may be in training mode, please export " + "it in test mode."; + return false; + } + + const auto& input_defs = node.InputDefs(); + std::vector input_shape; + if (!GetShape(*input_defs[0], input_shape, logger)) + return false; + + const auto input_size = input_shape.size(); + // TODO, support 1d batch normalization (input is 3d) + // To map 1d input {N,C,H} to 2d {N,C,H,1} first and then squeeze back after + if (input_size != 4) { + LOGS(logger, VERBOSE) << "BN only support 4d shape for now, input is " + << input_size << "d shape"; + return false; + } + + NodeAttrHelper helper(node); + const auto spatial = helper.Get("spatial", 1); + if (spatial != 1) { + LOGS(logger, VERBOSE) << "Non-spatial BN is not supported"; + return false; + } + + const auto& scale_name = input_defs[1]->Name(); + const auto& b_name = input_defs[2]->Name(); + const auto& mean_name = input_defs[3]->Name(); + const auto& var_name = input_defs[4]->Name(); + if (!Contains(initializers, scale_name)) { + LOGS(logger, VERBOSE) << "Scale of BN must be a constant initializer"; + return false; + } + if (!Contains(initializers, b_name)) { + LOGS(logger, VERBOSE) << "B of BN must be a constant initializer"; + return false; + } + if (!Contains(initializers, mean_name)) { + LOGS(logger, VERBOSE) << "Mean of BN must be a constant initializer"; + return false; + } + if (!Contains(initializers, var_name)) { + LOGS(logger, VERBOSE) << "Var of BN must be a constant initializer"; + return false; + } + + return true; +} + +void CreateBatchNormalizationOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) { + op_registrations.builders.push_back(onnxruntime::make_unique()); + op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get()); +} + +} // namespace coreml +} // namespace onnxruntime diff --git a/onnxruntime/core/providers/coreml/builders/impl/binary_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/binary_op_builder.cc index b9dd9cb5b1575..ba2b395e4bfb8 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/binary_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/binary_op_builder.cc @@ -43,7 +43,7 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const *layer->mutable_input()->Add() = input_defs[1]->Name(); *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); - model_builder.AddLayer(layer.release()); + model_builder.AddLayer(std::move(layer)); return Status::OK(); } diff --git a/onnxruntime/core/providers/coreml/builders/impl/conv_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/conv_op_builder.cc index 98fb0c3f2e63e..5ac2ce6bbb6a1 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/conv_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/conv_op_builder.cc @@ -42,7 +42,7 @@ void ConvOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Nod } Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, - const logging::Logger& /* logger */) const { + const logging::Logger& logger) const { std::unique_ptr layer = CreateNNLayer(node); const auto input_defs = node.InputDefs(); @@ -72,7 +72,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N // Usually using autopadding is more efficient than using explicit padding // Try to see if we can map explicit padding to auto padding std::vector input_shape; - ORT_RETURN_IF_ERROR(GetShape(*input_defs[0], input_shape)); + ORT_RETURN_IF_NOT(GetShape(*input_defs[0], input_shape, logger), "Cannot get shape"); AutoPadType auto_pad_type; ORT_RETURN_IF_ERROR(HandleAutoPad(input_shape, weight_shape[2], weight_shape[3], onnx_pads, strides, dilations, @@ -110,7 +110,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N *layer->mutable_input()->Add() = node.InputDefs()[0]->Name(); *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); - model_builder.AddLayer(layer.release()); + model_builder.AddLayer(std::move(layer)); return Status::OK(); } diff --git a/onnxruntime/core/providers/coreml/builders/impl/pool_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/pool_op_builder.cc new file mode 100644 index 0000000000000..5725fb303a3f9 --- /dev/null +++ b/onnxruntime/core/providers/coreml/builders/impl/pool_op_builder.cc @@ -0,0 +1,94 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/providers/shared/utils/utils.h" +#include "core/providers/coreml/builders/helper.h" +#include "core/providers/coreml/builders/model_builder.h" +#include "core/providers/coreml/builders/op_builder_factory.h" + +#include "base_op_builder.h" + +namespace onnxruntime { +namespace coreml { + +class PoolOpBuilder : public BaseOpBuilder { + // Add operator related + private: + Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, + const logging::Logger& logger) const override ORT_MUST_USE_RESULT; + + // Operator support related + private: + bool IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node, + const logging::Logger& logger) const override; +}; + +// Add operator related + +Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, + const Node& node, + const logging::Logger& /* logger */) const { + std::unique_ptr layer = CreateNNLayer(node); + + auto* coreml_pool = layer->mutable_pooling(); + const auto& op_type = node.OpType(); + + // We only support global pool now + coreml_pool->set_globalpooling(true); + coreml_pool->mutable_valid(); + + if (op_type == "GlobalAveragePool") { + coreml_pool->set_type(COREML_SPEC::PoolingLayerParams_PoolingType_AVERAGE); + } else if (op_type == "GlobalMaxPool") { + coreml_pool->set_type(COREML_SPEC::PoolingLayerParams_PoolingType_MAX); + } + + *layer->mutable_input()->Add() = node.InputDefs()[0]->Name(); + *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); + + model_builder.AddLayer(std::move(layer)); + return Status::OK(); +} + +// Operator support related +bool PoolOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& /* initializers */, const Node& node, + const logging::Logger& logger) const { + const auto& op_type = node.OpType(); + if (op_type != "GlobalAveragePool" && op_type != "GlobalMaxPool") { + LOGS(logger, VERBOSE) << "[" << op_type << "] is not supported"; + return false; + } + + std::vector input_shape; + if (!GetShape(*node.InputDefs()[0], input_shape, logger)) + return false; + + const auto input_size = input_shape.size(); + if (input_size != 4) { + LOGS(logger, VERBOSE) + << op_type << " only supports rank-4 tensor, input [" + << node.InputDefs()[0]->Name() << "] has actual dim count " << input_size; + return false; + } + + return true; +} + +void CreatePoolOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) { + if (op_registrations.op_builder_map.find(op_type) != op_registrations.op_builder_map.cend()) + return; + + static std::vector op_types = + { + "GlobalAveragePool", + "GlobalMaxPool", + }; + + op_registrations.builders.push_back(onnxruntime::make_unique()); + for (const auto& op_type : op_types) { + op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get()); + } +} + +} // namespace coreml +} // namespace onnxruntime diff --git a/onnxruntime/core/providers/coreml/builders/impl/reshape_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/reshape_op_builder.cc new file mode 100644 index 0000000000000..a5119890357fa --- /dev/null +++ b/onnxruntime/core/providers/coreml/builders/impl/reshape_op_builder.cc @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/providers/common.h" +#include "core/providers/cpu/tensor/reshape_helper.h" + +#include "core/providers/shared/utils/utils.h" +#include "core/providers/coreml/builders/helper.h" +#include "core/providers/coreml/builders/model_builder.h" +#include "core/providers/coreml/builders/op_builder_factory.h" + +#include "base_op_builder.h" + +namespace onnxruntime { +namespace coreml { + +class ReshapeOpBuilder : public BaseOpBuilder { + // Add operator related + public: + void AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const override; + + private: + Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, + const logging::Logger& logger) const override ORT_MUST_USE_RESULT; + + // Operator support related + private: + bool IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node, + const logging::Logger& logger) const override; + + // Reshape opset 4- uses attributes for new shape which we do not support for now + int GetMinSupportedOpSet(const Node& /* node */) const override { return 5; } +}; + +// Add operator related + +void ReshapeOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const { + model_builder.AddInitializerToSkip(node.InputDefs()[1]->Name()); +} + +Status ReshapeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, + const Node& node, + const logging::Logger& logger) const { + std::unique_ptr layer = CreateNNLayer(node); + + const auto& input_defs = node.InputDefs(); + const auto& initializers(model_builder.GetInitializerTensors()); + const auto& target_shape_tensor = *initializers.at(input_defs[1]->Name()); + const int64_t* raw_target_shape = target_shape_tensor.int64_data().empty() + ? reinterpret_cast(target_shape_tensor.raw_data().data()) + : target_shape_tensor.int64_data().data(); + + const auto size = target_shape_tensor.dims()[0]; + std::vector target_shape; + std::copy(raw_target_shape, raw_target_shape + size, std::back_inserter(target_shape)); + + std::vector input_shape; + ORT_RETURN_IF_NOT(GetShape(*input_defs[0], input_shape, logger), "Cannot get shape"); + ReshapeHelper helper(TensorShape(input_shape), target_shape); + std::copy(target_shape.cbegin(), target_shape.cend(), + google::protobuf::RepeatedFieldBackInserter( + layer->mutable_reshapestatic()->mutable_targetshape())); + + *layer->mutable_input()->Add() = input_defs[0]->Name(); + *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); + + model_builder.AddLayer(std::move(layer)); + return Status::OK(); +} + +// Operator support related + +bool ReshapeOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node, + const logging::Logger& logger) const { + const auto& input_defs = node.InputDefs(); + const auto& perm_name = input_defs[1]->Name(); + if (!Contains(initializers, perm_name)) { + LOGS(logger, VERBOSE) << "New shape of reshape must be a constant initializer"; + return false; + } + + const auto& perm_dims = initializers.at(perm_name)->dims(); + if (perm_dims.empty() || perm_dims[0] == 0) { + LOGS(logger, VERBOSE) << "New shape of reshape cannot be empty"; + return false; + } + + std::vector input_shape; + if (!GetShape(*input_defs[0], input_shape, logger)) + return false; + + if (input_shape.empty()) { + LOGS(logger, VERBOSE) << "Reshape does not support empty input shape"; + return false; + } + + return true; +} + +void CreateReshapeOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) { + op_registrations.builders.push_back(onnxruntime::make_unique()); + op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get()); +} + +} // namespace coreml +} // namespace onnxruntime diff --git a/onnxruntime/core/providers/coreml/builders/impl/transpose_op_builder.cc b/onnxruntime/core/providers/coreml/builders/impl/transpose_op_builder.cc index 300a786e90ddb..f544fbf51b51a 100644 --- a/onnxruntime/core/providers/coreml/builders/impl/transpose_op_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/impl/transpose_op_builder.cc @@ -22,13 +22,13 @@ class TransposeOpBuilder : public BaseOpBuilder { Status TransposeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, - const logging::Logger& /* logger */) const { + const logging::Logger& logger) const { std::unique_ptr layer = CreateNNLayer(node); NodeAttrHelper helper(node); std::vector perm = helper.Get("perm", std::vector()); std::vector input_shape; - ORT_RETURN_IF_ERROR(GetShape(*node.InputDefs()[0], input_shape)); + ORT_RETURN_IF_NOT(GetShape(*node.InputDefs()[0], input_shape, logger), "Cannot get shape"); auto input_dims = input_shape.size(); if (perm.empty()) { for (int64_t i = input_dims - 1; i >= 0; i--) @@ -42,7 +42,7 @@ Status TransposeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, *layer->mutable_input()->Add() = node.InputDefs()[0]->Name(); *layer->mutable_output()->Add() = node.OutputDefs()[0]->Name(); - model_builder.AddLayer(layer.release()); + model_builder.AddLayer(std::move(layer)); return Status::OK(); } diff --git a/onnxruntime/core/providers/coreml/builders/model_builder.cc b/onnxruntime/core/providers/coreml/builders/model_builder.cc index d8779a38418ba..97c6ade7903cb 100644 --- a/onnxruntime/core/providers/coreml/builders/model_builder.cc +++ b/onnxruntime/core/providers/coreml/builders/model_builder.cc @@ -83,7 +83,7 @@ Status ModelBuilder::RegisterInitializers() { CreateCoreMLWeight(*constant_tensor->mutable_data(), tensor); *layer->mutable_output()->Add() = name; - AddLayer(layer.release()); + AddLayer(std::move(layer)); } return Status::OK(); @@ -217,9 +217,9 @@ void ModelBuilder::AddScalarOutput(const std::string& output_name) { scalar_outputs_.insert(output_name); } -void ModelBuilder::AddLayer(COREML_SPEC::NeuralNetworkLayer* layer) { +void ModelBuilder::AddLayer(std::unique_ptr layer) { auto* neural_network = coreml_model_->mutable_neuralnetwork(); - neural_network->mutable_layers()->AddAllocated(layer); + neural_network->mutable_layers()->AddAllocated(layer.release()); } void ModelBuilder::AddInitializerToSkip(const std::string& tensor_name) { diff --git a/onnxruntime/core/providers/coreml/builders/model_builder.h b/onnxruntime/core/providers/coreml/builders/model_builder.h index 6be3a0bb744cf..2ce5e920149dd 100644 --- a/onnxruntime/core/providers/coreml/builders/model_builder.h +++ b/onnxruntime/core/providers/coreml/builders/model_builder.h @@ -27,7 +27,7 @@ class ModelBuilder { const GraphViewer& GetGraphViewer() const { return graph_viewer_; } const InitializedTensorSet& GetInitializerTensors() const { return graph_viewer_.GetAllInitializedTensors(); } - void AddLayer(COREML_SPEC::NeuralNetworkLayer* layer); + void AddLayer(std::unique_ptr layer); // The initializer will be processed separately, skip it as an initializer void AddInitializerToSkip(const std::string& tensor_name); diff --git a/onnxruntime/core/providers/coreml/builders/op_builder_factory.cc b/onnxruntime/core/providers/coreml/builders/op_builder_factory.cc index 6596282e3c9fe..237c8825a7756 100644 --- a/onnxruntime/core/providers/coreml/builders/op_builder_factory.cc +++ b/onnxruntime/core/providers/coreml/builders/op_builder_factory.cc @@ -33,6 +33,20 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() { CreateConvOpBuilder("Conv", op_registrations); } + { // Batch Normalization + CreateBatchNormalizationOpBuilder("BatchNormalization", op_registrations); + } + + { // Reshape + CreateReshapeOpBuilder("Reshape", op_registrations); + } + + { // Pool + // TODO support AveragePool and MaxPool + CreatePoolOpBuilder("GlobalAveragePool", op_registrations); + CreatePoolOpBuilder("GlobalMaxPool", op_registrations); + } + return op_registrations; } diff --git a/onnxruntime/core/providers/coreml/builders/op_builder_factory.h b/onnxruntime/core/providers/coreml/builders/op_builder_factory.h index 553c1bc1bd2f8..5540c7cb944cc 100644 --- a/onnxruntime/core/providers/coreml/builders/op_builder_factory.h +++ b/onnxruntime/core/providers/coreml/builders/op_builder_factory.h @@ -21,8 +21,11 @@ const std::unordered_map& GetOpBuilders(); void CreateBinaryOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); void CreateTransposeOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); void CreateConvOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); +void CreateBatchNormalizationOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); +void CreateReshapeOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); void CreateActivationOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); +void CreatePoolOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations); } // namespace coreml } // namespace onnxruntime \ No newline at end of file