diff --git a/cmake/external/onnx b/cmake/external/onnx index 0a4d5abdf4939..0c8d857bb1624 160000 --- a/cmake/external/onnx +++ b/cmake/external/onnx @@ -1 +1 @@ -Subproject commit 0a4d5abdf4939ab0842a5eadcc16a3bf0738f901 +Subproject commit 0c8d857bb162431912b255d5c0e773fb7c131a65 diff --git a/onnxruntime/core/framework/onnxruntime_typeinfo.cc b/onnxruntime/core/framework/onnxruntime_typeinfo.cc index 250d83e4dcac1..655bc840fceb0 100644 --- a/onnxruntime/core/framework/onnxruntime_typeinfo.cc +++ b/onnxruntime/core/framework/onnxruntime_typeinfo.cc @@ -52,7 +52,7 @@ OrtStatus* OrtTypeInfo::FromDataTypeImpl(const onnxruntime::DataTypeImpl* input, return OrtCreateStatus(ORT_NOT_IMPLEMENTED, "not implemented"); } -const DataTypeImpl* ElementTypeFromProto(ONNX_NAMESPACE::TensorProto_DataType type) { +const DataTypeImpl* ElementTypeFromProto(int type) { switch (type) { case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: return DataTypeImpl::GetType(); diff --git a/onnxruntime/core/graph/contrib_ops/range_schema_defs.cc b/onnxruntime/core/graph/contrib_ops/range_schema_defs.cc index fc31ee4f94371..e5524281b223e 100644 --- a/onnxruntime/core/graph/contrib_ops/range_schema_defs.cc +++ b/onnxruntime/core/graph/contrib_ops/range_schema_defs.cc @@ -81,7 +81,7 @@ static int64_t CalcRangeDim(const TensorProto* startShapeInitializer, static int64_t CalcResultDim(const TensorProto* startShapeInitializer, const TensorProto* limitShapeInitializer, const TensorProto* deltaShapeInitializer, - TensorProto_DataType dtype) { + int dtype) { int64_t dim = -1LL; if (dtype == TensorProto::FLOAT) { dim = CalcRangeDim(startShapeInitializer, limitShapeInitializer, deltaShapeInitializer); @@ -146,7 +146,7 @@ OpSchema& RegisterRangeOpSchema(OpSchema&& op_schema){ const TensorProto* limitShapeInitializer = ctx.getInputData(1); const TensorProto* deltaShapeInitializer = (ctx.getNumInputs() > 2) ? ctx.getInputData(2) : nullptr; const auto& startTensorType = ctx.getInputType(0)->tensor_type(); - TensorProto_DataType dtype = startTensorType.elem_type(); + int dtype = startTensorType.elem_type(); int64_t n = CalcResultDim(startShapeInitializer, limitShapeInitializer, deltaShapeInitializer, dtype); dim.set_dim_value(n); diff --git a/onnxruntime/core/graph/graph.cc b/onnxruntime/core/graph/graph.cc index 08b0a1d6e0b9d..cd3cfaa4c2bde 100644 --- a/onnxruntime/core/graph/graph.cc +++ b/onnxruntime/core/graph/graph.cc @@ -152,8 +152,8 @@ common::Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& inpu if (input_tensor_elem_type != current_tensor_elem_type) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Tensor element type mismatch. ", - TensorProto_DataType_Name(input_tensor_elem_type), " != ", - TensorProto_DataType_Name(current_tensor_elem_type)); + TensorProto_DataType_Name(static_cast(input_tensor_elem_type)), " != ", + TensorProto_DataType_Name(static_cast(current_tensor_elem_type))); if (input_tensor_type.has_shape()) { auto& current_tensor_type = *current_type.mutable_tensor_type(); @@ -172,8 +172,8 @@ common::Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& inpu const auto current_tensor_elem_type = current_type.sparse_tensor_type().elem_type(); if (input_tensor_elem_type != current_tensor_elem_type) { return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "SparseTensor element type mismatch. ", - TensorProto_DataType_Name(input_tensor_elem_type), " != ", - TensorProto_DataType_Name(current_tensor_elem_type)); + TensorProto_DataType_Name(static_cast(input_tensor_elem_type)), " != ", + TensorProto_DataType_Name(static_cast(current_tensor_elem_type))); } if (input_tensor_type.has_shape()) { auto& current_tensor_type = *current_type.mutable_sparse_tensor_type(); diff --git a/onnxruntime/core/graph/initializer.h b/onnxruntime/core/graph/initializer.h index 08b7951acda61..d6c15b2cd49ea 100644 --- a/onnxruntime/core/graph/initializer.h +++ b/onnxruntime/core/graph/initializer.h @@ -128,11 +128,11 @@ class Initializer final { } } - ONNX_NAMESPACE::TensorProto_DataType data_type() const { + int data_type() const { return data_type_; } - ONNX_NAMESPACE::TensorProto_DataType& data_type() { + int& data_type() { return data_type_; } @@ -372,7 +372,7 @@ class Initializer final { } private: - ONNX_NAMESPACE::TensorProto_DataType data_type_; + int data_type_; std::string name_; std::vector dims_; int64_t size_; diff --git a/onnxruntime/core/protobuf/onnx-ml.proto b/onnxruntime/core/protobuf/onnx-ml.proto index 79edbb6ef49ce..10463ddf63409 100644 --- a/onnxruntime/core/protobuf/onnx-ml.proto +++ b/onnxruntime/core/protobuf/onnx-ml.proto @@ -330,7 +330,7 @@ message TensorProto { repeated int64 dims = 1; // The data type of the tensor. - optional DataType data_type = 2; + optional int32 data_type = 2; // For very large tensors, we may want to store them in chunks, in which // case the following fields will specify the segment that is stored in @@ -438,7 +438,7 @@ message TypeProto { message Tensor { // This field MUST NOT have the value of UNDEFINED // This field MUST be present for this version of the IR. - optional TensorProto.DataType elem_type = 1; + optional int32 elem_type = 1; optional TensorShapeProto shape = 2; } @@ -454,7 +454,7 @@ message TypeProto { message Map { // This field MUST be present for this version of the IR. // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING - optional TensorProto.DataType key_type = 1; + optional int32 key_type = 1; // This field MUST be present for this version of the IR. optional TypeProto value_type = 2; }; @@ -469,10 +469,10 @@ message TypeProto { // repeated TypeProto parameters = 3; } - message SparseTensor { - // This field MUST NOT have the value of UNDEFINED - // This field MUST be present for this version of the IR. - optional TensorProto.DataType elem_type = 1; + message SparseTensor { + // This field MUST NOT have the value of UNDEFINED + // This field MUST be present for this version of the IR. + optional int32 elem_type = 1; optional TensorShapeProto shape = 2; } diff --git a/onnxruntime/test/util/compare_mlvalue.cc b/onnxruntime/test/util/compare_mlvalue.cc index 1b518b7413be7..a5c6f581d597a 100644 --- a/onnxruntime/test/util/compare_mlvalue.cc +++ b/onnxruntime/test/util/compare_mlvalue.cc @@ -26,7 +26,7 @@ using namespace onnxruntime; namespace { -ONNXTensorElementDataType CApiElementTypeFromProto(ONNX_NAMESPACE::TensorProto_DataType type) { +ONNXTensorElementDataType CApiElementTypeFromProto(int type) { switch (type) { CASE_TYPE(FLOAT) CASE_TYPE(UINT8) diff --git a/tools/ci_build/github/linux/docker/scripts/install_deps.sh b/tools/ci_build/github/linux/docker/scripts/install_deps.sh index 364cbb8bf8d19..d092283f78010 100755 --- a/tools/ci_build/github/linux/docker/scripts/install_deps.sh +++ b/tools/ci_build/github/linux/docker/scripts/install_deps.sh @@ -33,8 +33,8 @@ else #Install ONNX #5af210ca8a1c73aa6bae8754c9346ec54d0a756e is v1.2.3 #bae6333e149a59a3faa9c4d9c44974373dcf5256 is v1.3.0 - #0a4d5abdf4939ab0842a5eadcc16a3bf0738f901 is v1.3.0 latest - for onnx_version in "5af210ca8a1c73aa6bae8754c9346ec54d0a756e" "bae6333e149a59a3faa9c4d9c44974373dcf5256" "0a4d5abdf4939ab0842a5eadcc16a3bf0738f901"; do + #0c8d857bb162431912b255d5c0e773fb7c131a65 is v1.3.0 latest + for onnx_version in "5af210ca8a1c73aa6bae8754c9346ec54d0a756e" "bae6333e149a59a3faa9c4d9c44974373dcf5256" "0c8d857bb162431912b255d5c0e773fb7c131a65"; do if [ -z ${lastest_onnx_version+x} ]; then echo "first pass"; else