diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/ExecutionProviderFactory.cs b/csharp/src/Microsoft.ML.OnnxRuntime/ExecutionProviderFactory.cs index 409076fb6d5cd..f96f2694e52bb 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/ExecutionProviderFactory.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/ExecutionProviderFactory.cs @@ -16,7 +16,7 @@ public CpuExecutionProviderFactory(bool useArena=true) int useArenaInt = useArena ? 1 : 0; try { - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeCreateCpuExecutionProviderFactory(useArenaInt, out handle)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateCpuExecutionProviderFactory(useArenaInt, out handle)); } catch(OnnxRuntimeException e) { @@ -48,7 +48,7 @@ public MklDnnExecutionProviderFactory(bool useArena = true) int useArenaInt = useArena ? 1 : 0; try { - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeCreateMkldnnExecutionProviderFactory(useArenaInt, out handle)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateMkldnnExecutionProviderFactory(useArenaInt, out handle)); } catch (OnnxRuntimeException e) { diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/InferenceSession.cs b/csharp/src/Microsoft.ML.OnnxRuntime/InferenceSession.cs index e7921f9d8cb16..c3382f3d4b3dc 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/InferenceSession.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/InferenceSession.cs @@ -44,7 +44,7 @@ public InferenceSession(string modelPath, SessionOptions options) _nativeHandle = IntPtr.Zero; try { - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeCreateInferenceSession(envHandle, modelPath, options.NativeHandle, out _nativeHandle)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateInferenceSession(envHandle, modelPath, options.NativeHandle, out _nativeHandle)); // Initialize input/output metadata _inputMetadata = new Dictionary(); @@ -52,7 +52,7 @@ public InferenceSession(string modelPath, SessionOptions options) // get input count ulong inputCount = 0; - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeInferenceSessionGetInputCount(_nativeHandle, out inputCount)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtInferenceSessionGetInputCount(_nativeHandle, out inputCount)); // get all the output names for (ulong i = 0; i < inputCount; i++) @@ -62,7 +62,7 @@ public InferenceSession(string modelPath, SessionOptions options) // get output count ulong outputCount = 0; - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeInferenceSessionGetOutputCount(_nativeHandle, out outputCount)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtInferenceSessionGetOutputCount(_nativeHandle, out outputCount)); // get all the output names for (ulong i = 0; i < outputCount; i++) @@ -148,7 +148,7 @@ internal IReadOnlyCollection Run(IReadOnlyCollection internal void FreeMemory(IntPtr memory) { - NativeMethods.ONNXRuntimeAllocatorFree(handle, memory); + NativeMethods.OrtAllocatorFree(handle, memory); } public override bool IsInvalid @@ -141,7 +141,7 @@ protected NativeMemoryAllocator(IntPtr allocator) protected static void Delete(IntPtr allocator) { - NativeMethods.ONNXRuntimeReleaseObject(allocator); + NativeMethods.OrtReleaseObject(allocator); } protected override bool ReleaseHandle() diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NativeMethods.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NativeMethods.cs index cb089b7bb32f6..861e717168614 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NativeMethods.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NativeMethods.cs @@ -17,7 +17,7 @@ internal static class NativeMethods #region Runtime/Environment API [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /* ONNXStatus* */ONNXRuntimeInitialize( + public static extern IntPtr /* ONNXStatus* */OrtInitialize( LogLevel default_warning_level, string logId, out IntPtr /*(ONNXEnv*)*/ env); @@ -28,10 +28,10 @@ internal static class NativeMethods #region Status API [DllImport(nativeLib, CharSet = charSet)] - public static extern ErrorCode ONNXRuntimeGetErrorCode(IntPtr /*(ONNXStatus*)*/status); + public static extern ErrorCode OrtGetErrorCode(IntPtr /*(ONNXStatus*)*/status); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /* char* */ONNXRuntimeGetErrorMessage(IntPtr /* (ONNXStatus*) */status); + public static extern IntPtr /* char* */OrtGetErrorMessage(IntPtr /* (ONNXStatus*) */status); // returns char*, need to convert to string by the caller. // does not free the underlying ONNXStatus* @@ -43,14 +43,14 @@ internal static class NativeMethods #region InferenceSession API [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /* ONNXStatus* */ONNXRuntimeCreateInferenceSession( + public static extern IntPtr /* ONNXStatus* */OrtCreateInferenceSession( IntPtr /* (ONNXEnv*) */ environment, [MarshalAs(UnmanagedType.LPWStr)]string modelPath, //the model path is consumed as a wchar* in the C-api - IntPtr /* (ONNXRuntimeSessionOptions*) */sessopnOptions, + IntPtr /* (OrtSessionOptions*) */sessopnOptions, out IntPtr /**/ session); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNStatus*)*/ ONNXRuntimeRunInference( + public static extern IntPtr /*(ONNStatus*)*/ OrtRunInference( IntPtr /*(ONNXSession*)*/ session, IntPtr /*(ONNXSessionRunOptions*)*/ runOptions, // can be null to use the default options string[] inputNames, @@ -65,43 +65,43 @@ IntPtr[] outputValues /* An array of output value pointers. Array must be alloca [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeInferenceSessionGetInputCount( + public static extern IntPtr /*(ONNXStatus*)*/ OrtInferenceSessionGetInputCount( IntPtr /*(ONNXSession*)*/ session, out ulong /* TODO: size_t */ count); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeInferenceSessionGetOutputCount( + public static extern IntPtr /*(ONNXStatus*)*/ OrtInferenceSessionGetOutputCount( IntPtr /*(ONNXSession*)*/ session, out ulong /*TODO: size_t port*/ count); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ONNXRuntimeInferenceSessionGetInputName( + public static extern IntPtr /*(ONNXStatus*)*/OrtInferenceSessionGetInputName( IntPtr /*(ONNXSession*)*/ session, ulong index, //TODO: port size_t - IntPtr /*(ONNXRuntimeAllocator*)*/ allocator, + IntPtr /*(OrtAllocator*)*/ allocator, out IntPtr /*(char**)*/name); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ONNXRuntimeInferenceSessionGetOutputName( + public static extern IntPtr /*(ONNXStatus*)*/OrtInferenceSessionGetOutputName( IntPtr /*(ONNXSession*)*/ session, ulong index, //TODO: port size_t - IntPtr /*(ONNXRuntimeAllocator*)*/ allocator, + IntPtr /*(OrtAllocator*)*/ allocator, out IntPtr /*(char**)*/name); - // release the typeinfo using ONNXRuntimeReleaseObject + // release the typeinfo using OrtReleaseObject [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ONNXRuntimeInferenceSessionGetInputTypeInfo( + public static extern IntPtr /*(ONNXStatus*)*/OrtInferenceSessionGetInputTypeInfo( IntPtr /*(const ONNXSession*)*/ session, ulong index, //TODO: port for size_t - out IntPtr /*(struct ONNXRuntimeTypeInfo**)*/ typeInfo); + out IntPtr /*(struct OrtTypeInfo**)*/ typeInfo); - // release the typeinfo using ONNXRuntimeReleaseObject + // release the typeinfo using OrtReleaseObject [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ONNXRuntimeInferenceSessionGetOutputTypeInfo( + public static extern IntPtr /*(ONNXStatus*)*/OrtInferenceSessionGetOutputTypeInfo( IntPtr /*(const ONNXSession*)*/ session, ulong index, //TODO: port for size_t - out IntPtr /* (struct ONNXRuntimeTypeInfo**)*/ typeInfo); + out IntPtr /* (struct OrtTypeInfo**)*/ typeInfo); [DllImport(nativeLib, CharSet = charSet)] @@ -111,46 +111,46 @@ IntPtr[] outputValues /* An array of output value pointers. Array must be alloca #region SessionOptions API - //Release using ONNXRuntimeReleaseObject + //Release using OrtReleaseObject [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*ONNXRuntimeSessionOptions* */ ONNXRuntimeCreateSessionOptions(); + public static extern IntPtr /*OrtSessionOptions* */ OrtCreateSessionOptions(); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXRuntimeSessionOptions*)*/ONNXRuntimeCloneSessionOptions(IntPtr /*(ONNXRuntimeSessionOptions*)*/ sessionOptions); + public static extern IntPtr /*(OrtSessionOptions*)*/OrtCloneSessionOptions(IntPtr /*(OrtSessionOptions*)*/ sessionOptions); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeEnableSequentialExecution(IntPtr /*(ONNXRuntimeSessionOptions*)*/ options); + public static extern void OrtEnableSequentialExecution(IntPtr /*(OrtSessionOptions*)*/ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeDisableSequentialExecution(IntPtr /*(ONNXRuntimeSessionOptions*)*/ options); + public static extern void OrtDisableSequentialExecution(IntPtr /*(OrtSessionOptions*)*/ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeEnableProfiling(IntPtr /* ONNXRuntimeSessionOptions* */ options, string profilePathPrefix); + public static extern void OrtEnableProfiling(IntPtr /* OrtSessionOptions* */ options, string profilePathPrefix); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeDisableProfiling(IntPtr /* ONNXRuntimeSessionOptions* */ options); + public static extern void OrtDisableProfiling(IntPtr /* OrtSessionOptions* */ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeEnableMemPattern(IntPtr /* ONNXRuntimeSessionOptions* */ options); + public static extern void OrtEnableMemPattern(IntPtr /* OrtSessionOptions* */ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeDisableMemPattern(IntPtr /* ONNXRuntimeSessionOptions* */ options); + public static extern void OrtDisableMemPattern(IntPtr /* OrtSessionOptions* */ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeEnableCpuMemArena(IntPtr /* ONNXRuntimeSessionOptions* */ options); + public static extern void OrtEnableCpuMemArena(IntPtr /* OrtSessionOptions* */ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeDisableCpuMemArena(IntPtr /* ONNXRuntimeSessionOptions* */ options); + public static extern void OrtDisableCpuMemArena(IntPtr /* OrtSessionOptions* */ options); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeSetSessionLogId(IntPtr /* ONNXRuntimeSessionOptions* */ options, string logId); + public static extern void OrtSetSessionLogId(IntPtr /* OrtSessionOptions* */ options, string logId); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeSetSessionLogVerbosityLevel(IntPtr /* ONNXRuntimeSessionOptions* */ options, LogLevel sessionLogVerbosityLevel); + public static extern void OrtSetSessionLogVerbosityLevel(IntPtr /* OrtSessionOptions* */ options, LogLevel sessionLogVerbosityLevel); [DllImport(nativeLib, CharSet = charSet)] - public static extern int ONNXRuntimeSetSessionThreadPoolSize(IntPtr /* ONNXRuntimeSessionOptions* */ options, int sessionThreadPoolSize); + public static extern int OrtSetSessionThreadPoolSize(IntPtr /* OrtSessionOptions* */ options, int sessionThreadPoolSize); ///** // * The order of invocation indicates the preference order as well. In other words call this method @@ -158,22 +158,22 @@ IntPtr[] outputValues /* An array of output value pointers. Array must be alloca // * Calling this API is optional in which case onnxruntime will use its internal CPU execution provider. // */ [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeSessionOptionsAppendExecutionProvider(IntPtr /*(ONNXRuntimeSessionOptions*)*/ options, IntPtr /* (ONNXRuntimeProviderFactoryPtr*)*/ factory); + public static extern void OrtSessionOptionsAppendExecutionProvider(IntPtr /*(OrtSessionOptions*)*/ options, IntPtr /* (OrtProviderFactoryPtr*)*/ factory); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeCreateCpuExecutionProviderFactory(int use_arena, out IntPtr /*(ONNXRuntimeProviderFactoryPtr*)*/ factory); + public static extern IntPtr /*(ONNXStatus*)*/ OrtCreateCpuExecutionProviderFactory(int use_arena, out IntPtr /*(OrtProviderFactoryPtr*)*/ factory); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeCreateMkldnnExecutionProviderFactory(int use_arena, out IntPtr /*(ONNXRuntimeProviderFactoryPtr**)*/ factory); + public static extern IntPtr /*(ONNXStatus*)*/ OrtCreateMkldnnExecutionProviderFactory(int use_arena, out IntPtr /*(OrtProviderFactoryPtr**)*/ factory); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeCreateCUDAExecutionProviderFactory(int device_id, out IntPtr /*(ONNXRuntimeProviderFactoryPtr**)*/ factory); + public static extern IntPtr /*(ONNXStatus*)*/ OrtCreateCUDAExecutionProviderFactory(int device_id, out IntPtr /*(OrtProviderFactoryPtr**)*/ factory); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeCreateNupharExecutionProviderFactory(int device_id, string target_str, out IntPtr /*(ONNXRuntimeProviderFactoryPtr**)*/ factory); + public static extern IntPtr /*(ONNXStatus*)*/ OrtCreateNupharExecutionProviderFactory(int device_id, string target_str, out IntPtr /*(OrtProviderFactoryPtr**)*/ factory); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeAddCustomOp(IntPtr /*(ONNXRuntimeSessionOptions*)*/ options, string custom_op_path); + public static extern void OrtAddCustomOp(IntPtr /*(OrtSessionOptions*)*/ options, string custom_op_path); #endregion @@ -197,27 +197,27 @@ public enum MemoryType [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /* (ONNXStatus*)*/ ONNXRuntimeCreateAllocatorInfo( + public static extern IntPtr /* (ONNXStatus*)*/ OrtCreateAllocatorInfo( IntPtr /*(const char*) */name, AllocatorType allocatorType, int identifier, MemoryType memType, - out IntPtr /*(ONNXRuntimeAllocatorInfo*)*/ allocatorInfo // memory ownership transfered to caller + out IntPtr /*(OrtAllocatorInfo*)*/ allocatorInfo // memory ownership transfered to caller ); - //ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateCpuAllocatorInfo, enum ONNXRuntimeAllocatorType type, enum ONNXRuntimeMemType mem_type1, _Out_ ONNXRuntimeAllocatorInfo** out) + //ONNXRUNTIME_API_STATUS(OrtCreateCpuAllocatorInfo, enum OrtAllocatorType type, enum OrtMemType mem_type1, _Out_ OrtAllocatorInfo** out) [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /* (ONNXStatus*)*/ ONNXRuntimeCreateCpuAllocatorInfo( + public static extern IntPtr /* (ONNXStatus*)*/ OrtCreateCpuAllocatorInfo( AllocatorType allocatorType, MemoryType memoryType, - out IntPtr /*(ONNXRuntimeAllocatorInfo*)*/ allocatorInfo + out IntPtr /*(OrtAllocatorInfo*)*/ allocatorInfo ); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ReleaseONNXRuntimeAllocatorInfo(IntPtr /*(ONNXRuntimeAllocatorInfo*)*/ allocatorInfo); + public static extern void ReleaseOrtAllocatorInfo(IntPtr /*(OrtAllocatorInfo*)*/ allocatorInfo); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ONNXRuntimeCreateDefaultAllocator(out IntPtr /*(ONNXRuntimeAllocator**)*/ allocator); + public static extern IntPtr /*(ONNXStatus*)*/OrtCreateDefaultAllocator(out IntPtr /*(OrtAllocator**)*/ allocator); /// /// Releases/Unrefs any object, including the Allocator @@ -225,7 +225,7 @@ public enum MemoryType /// /// remaining ref count [DllImport(nativeLib, CharSet = charSet)] - public static extern uint /*remaining ref count*/ ONNXRuntimeReleaseObject(IntPtr /*(void*)*/ ptr); + public static extern uint /*remaining ref count*/ OrtReleaseObject(IntPtr /*(void*)*/ ptr); /// /// Release any object allocated by an allocator @@ -233,18 +233,18 @@ public enum MemoryType /// /// [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeAllocatorFree(IntPtr allocator, IntPtr memory); + public static extern void OrtAllocatorFree(IntPtr allocator, IntPtr memory); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(const struct ONNXRuntimeAllocatorInfo*)*/ ONNXRuntimeAllocatorGetInfo(IntPtr /*(const ONNXRuntimeAllocator*)*/ ptr); + public static extern IntPtr /*(const struct OrtAllocatorInfo*)*/ OrtAllocatorGetInfo(IntPtr /*(const OrtAllocator*)*/ ptr); #endregion Allocator/AllocatorInfo API #region Tensor/OnnxValue API [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /* ONNXStatus */ ONNXRuntimeCreateTensorWithDataAsONNXValue( - IntPtr /* (const ONNXRuntimeAllocatorInfo*) */ allocatorInfo, + public static extern IntPtr /* ONNXStatus */ OrtCreateTensorWithDataAsONNXValue( + IntPtr /* (const OrtAllocatorInfo*) */ allocatorInfo, IntPtr /* (void*) */dataBufferHandle, ulong dataLength, //size_t, TODO: make it portable for x86, arm ulong[] shape, //size_t* or size_t[], TODO: make it portable for x86, arm @@ -255,30 +255,30 @@ public enum MemoryType /// This function doesn't work with string tensor /// this is a no-copy method whose pointer is only valid until the backing ONNXValuePtr is free'd. [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeGetTensorMutableData(IntPtr /*(ONNXValue*)*/ value, out IntPtr /* (void**)*/ dataBufferHandle); + public static extern IntPtr /*(ONNXStatus*)*/ OrtGetTensorMutableData(IntPtr /*(ONNXValue*)*/ value, out IntPtr /* (void**)*/ dataBufferHandle); //[DllImport(nativeLib, CharSet = charSet)] - //public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeGetTensorShapeDimCount(IntPtr /*(ONNXValue*)*/ value, out ulong dimension); //size_t TODO: make it portable for x86, arm + //public static extern IntPtr /*(ONNXStatus*)*/ OrtGetTensorShapeDimCount(IntPtr /*(ONNXValue*)*/ value, out ulong dimension); //size_t TODO: make it portable for x86, arm //[DllImport(nativeLib, CharSet = charSet)] - //public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeGetTensorShapeElementCount(IntPtr /*(ONNXValue*)*/value, out ulong count); + //public static extern IntPtr /*(ONNXStatus*)*/ OrtGetTensorShapeElementCount(IntPtr /*(ONNXValue*)*/value, out ulong count); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(const struct ONNXRuntimeTensorTypeAndShapeInfo*)*/ - ONNXRuntimeCastTypeInfoToTensorInfo(IntPtr /*(struct ONNXRuntimeTypeInfo*)*/ typeInfo); + public static extern IntPtr /*(const struct OrtTensorTypeAndShapeInfo*)*/ + OrtCastTypeInfoToTensorInfo(IntPtr /*(struct OrtTypeInfo*)*/ typeInfo); [DllImport(nativeLib, CharSet = charSet)] - public static extern IntPtr /*(ONNXStatus*)*/ ONNXRuntimeGetTensorShapeAndType(IntPtr /*(ONNXValue*)*/ value, out IntPtr /*(struct ONNXRuntimeTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); + public static extern IntPtr /*(ONNXStatus*)*/ OrtGetTensorShapeAndType(IntPtr /*(ONNXValue*)*/ value, out IntPtr /*(struct OrtTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); [DllImport(nativeLib, CharSet = charSet)] - public static extern TensorElementType ONNXRuntimeGetTensorElementType(IntPtr /*(const struct ONNXRuntimeTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); + public static extern TensorElementType OrtGetTensorElementType(IntPtr /*(const struct OrtTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); [DllImport(nativeLib, CharSet = charSet)] - public static extern ulong /*TODO: port for size_t */ONNXRuntimeGetNumOfDimensions(IntPtr /*(const struct ONNXRuntimeTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); + public static extern ulong /*TODO: port for size_t */OrtGetNumOfDimensions(IntPtr /*(const struct OrtTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); [DllImport(nativeLib, CharSet = charSet)] - public static extern void ONNXRuntimeGetDimensions( - IntPtr /*(const struct ONNXRuntimeTensorTypeAndShapeInfo*)*/ typeAndShapeInfo, + public static extern void OrtGetDimensions( + IntPtr /*(const struct OrtTensorTypeAndShapeInfo*)*/ typeAndShapeInfo, long[] dim_values, ulong dim_values_length); @@ -292,7 +292,7 @@ public static extern void ONNXRuntimeGetDimensions( * [-1,3,4] -> -1 */ [DllImport(nativeLib, CharSet = charSet)] - public static extern long ONNXRuntimeGetTensorShapeElementCount(IntPtr /*(const struct ONNXRuntimeTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); + public static extern long OrtGetTensorShapeElementCount(IntPtr /*(const struct OrtTensorTypeAndShapeInfo*)*/ typeAndShapeInfo); [DllImport(nativeLib, CharSet = charSet)] public static extern void ReleaseONNXValue(IntPtr /*(ONNXValue*)*/ value); diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxObjectHandle.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxObjectHandle.cs index f30ded50c7a8c..b2fa4c8f68ce0 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxObjectHandle.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxObjectHandle.cs @@ -21,7 +21,7 @@ public override bool IsInvalid protected override bool ReleaseHandle() { - NativeMethods.ONNXRuntimeReleaseObject(handle); + NativeMethods.OrtReleaseObject(handle); return true; } } diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxTensorMemory.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxTensorMemory.cs index 0339ac8871f01..d48d097240a5e 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxTensorMemory.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NativeOnnxTensorMemory.cs @@ -27,9 +27,9 @@ public NativeOnnxTensorMemory(IntPtr onnxValueHandle) IntPtr typeAndShape = IntPtr.Zero; try { - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeGetTensorShapeAndType(onnxValueHandle, out typeAndShape)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorShapeAndType(onnxValueHandle, out typeAndShape)); - TensorElementType elemType = NativeMethods.ONNXRuntimeGetTensorElementType(typeAndShape); + TensorElementType elemType = NativeMethods.OrtGetTensorElementType(typeAndShape); Type type = null; int width = 0; @@ -40,18 +40,18 @@ public NativeOnnxTensorMemory(IntPtr onnxValueHandle) _onnxValueHandle = onnxValueHandle; // derive the databuffer pointer, element_count, element_width, and shape - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeGetTensorMutableData(_onnxValueHandle, out _dataBufferHandle)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorMutableData(_onnxValueHandle, out _dataBufferHandle)); // throws OnnxRuntimeException if native call failed - ulong dimension = NativeMethods.ONNXRuntimeGetNumOfDimensions(typeAndShape); - long count = NativeMethods.ONNXRuntimeGetTensorShapeElementCount(typeAndShape); // count can be negative. + ulong dimension = NativeMethods.OrtGetNumOfDimensions(typeAndShape); + long count = NativeMethods.OrtGetTensorShapeElementCount(typeAndShape); // count can be negative. if (count < 0) { throw new NotSupportedException("Symbolic dimensions in the tensor is not supported"); } long[] shape = new long[dimension]; - NativeMethods.ONNXRuntimeGetDimensions(typeAndShape, shape, dimension); //Note: shape must be alive during the call + NativeMethods.OrtGetDimensions(typeAndShape, shape, dimension); //Note: shape must be alive during the call _elementCount = (int)count; _dimensions = new int[dimension]; @@ -70,7 +70,7 @@ public NativeOnnxTensorMemory(IntPtr onnxValueHandle) { if (typeAndShape != IntPtr.Zero) { - NativeMethods.ONNXRuntimeReleaseObject(typeAndShape); + NativeMethods.OrtReleaseObject(typeAndShape); } } } diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OnnxRuntime.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OnnxRuntime.cs index f2adfdc41f5c9..da46fc8a8bb13 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OnnxRuntime.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OnnxRuntime.cs @@ -54,7 +54,7 @@ public override bool IsInvalid handle = IntPtr.Zero; try { - NativeApiStatus.VerifySuccess(NativeMethods.ONNXRuntimeInitialize(LogLevel.Warning, @"CSharpOnnxRuntime", out handle)); + NativeApiStatus.VerifySuccess(NativeMethods.OrtInitialize(LogLevel.Warning, @"CSharpOnnxRuntime", out handle)); } catch (OnnxRuntimeException e) { diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/SessionOptions.cs b/csharp/src/Microsoft.ML.OnnxRuntime/SessionOptions.cs index 9d7829f490bee..e6cbedd39bedb 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/SessionOptions.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/SessionOptions.cs @@ -30,7 +30,7 @@ public class SessionOptions:IDisposable /// public SessionOptions() { - _nativeOption = new NativeOnnxObjectHandle(NativeMethods.ONNXRuntimeCreateSessionOptions()); + _nativeOption = new NativeOnnxObjectHandle(NativeMethods.OrtCreateSessionOptions()); } /// @@ -90,7 +90,7 @@ private void AppendExecutionProvider(NativeOnnxObjectHandle providerFactory) providerFactory.DangerousAddRef(ref success); if (success) { - NativeMethods.ONNXRuntimeSessionOptionsAppendExecutionProvider(_nativeOption.DangerousGetHandle(), providerFactory.DangerousGetHandle()); + NativeMethods.OrtSessionOptionsAppendExecutionProvider(_nativeOption.DangerousGetHandle(), providerFactory.DangerousGetHandle()); providerFactory.DangerousRelease(); } diff --git a/include/onnxruntime/core/common/common.h b/include/onnxruntime/core/common/common.h index ceb178d1cfe6c..e4c9ad658dc84 100644 --- a/include/onnxruntime/core/common/common.h +++ b/include/onnxruntime/core/common/common.h @@ -44,32 +44,32 @@ using TimePoint = std::chrono::high_resolution_clock::time_point; using common::Status; #ifdef _WIN32 -#define ONNXRUNTIME_UNUSED_PARAMETER(x) (x) +#define ORT_UNUSED_PARAMETER(x) (x) #else -#define ONNXRUNTIME_UNUSED_PARAMETER(x) (void)(x) +#define ORT_UNUSED_PARAMETER(x) (void)(x) #endif -#ifndef ONNXRUNTIME_HAVE_ATTRIBUTE +#ifndef ORT_HAVE_ATTRIBUTE #ifdef __has_attribute -#define ONNXRUNTIME_HAVE_ATTRIBUTE(x) __has_attribute(x) +#define ORT_HAVE_ATTRIBUTE(x) __has_attribute(x) #else -#define ONNXRUNTIME_HAVE_ATTRIBUTE(x) 0 +#define ORT_HAVE_ATTRIBUTE(x) 0 #endif #endif -// ONNXRUNTIME_ATTRIBUTE_UNUSED +// ORT_ATTRIBUTE_UNUSED // // Prevents the compiler from complaining about or optimizing away variables // that appear unused on Linux -#if ONNXRUNTIME_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) -#undef ONNXRUNTIME_ATTRIBUTE_UNUSED -#define ONNXRUNTIME_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#if ORT_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ORT_ATTRIBUTE_UNUSED +#define ORT_ATTRIBUTE_UNUSED __attribute__((__unused__)) #else -#define ONNXRUNTIME_ATTRIBUTE_UNUSED +#define ORT_ATTRIBUTE_UNUSED #endif // macro to explicitly ignore the return value from a function call so Code Analysis doesn't complain -#define ONNXRUNTIME_IGNORE_RETURN_VALUE(fn) \ +#define ORT_IGNORE_RETURN_VALUE(fn) \ static_cast(fn) std::vector GetStackTrace(); @@ -81,73 +81,73 @@ std::vector GetStackTrace(); #endif // Capture where a message is coming from. Use __FUNCTION__ rather than the much longer __PRETTY_FUNCTION__ -#define ONNXRUNTIME_WHERE \ +#define ORT_WHERE \ ::onnxruntime::CodeLocation(__FILE__, __LINE__, __FUNCTION__) -#define ONNXRUNTIME_WHERE_WITH_STACK \ +#define ORT_WHERE_WITH_STACK \ ::onnxruntime::CodeLocation(__FILE__, __LINE__, __PRETTY_FUNCTION__, ::onnxruntime::GetStackTrace()) // Throw an exception with optional message. // NOTE: The arguments get streamed into a string via ostringstream::operator<< // DO NOT use a printf format string, as that will not work as you expect. -#define ONNXRUNTIME_THROW(...) \ - throw ::onnxruntime::OnnxRuntimeException(ONNXRUNTIME_WHERE_WITH_STACK, ::onnxruntime::MakeString(__VA_ARGS__)) +#define ORT_THROW(...) \ + throw ::onnxruntime::OnnxRuntimeException(ORT_WHERE_WITH_STACK, ::onnxruntime::MakeString(__VA_ARGS__)) // Just in order to mark things as not implemented. Do not use in final code. -#define ONNXRUNTIME_NOT_IMPLEMENTED(...) \ +#define ORT_NOT_IMPLEMENTED(...) \ throw ::onnxruntime::NotImplementedException(::onnxruntime::MakeString(__VA_ARGS__)) // Check condition. // NOTE: The arguments get streamed into a string via ostringstream::operator<< // DO NOT use a printf format string, as that will not work as you expect. -#define ONNXRUNTIME_ENFORCE(condition, ...) \ - if (!(condition)) \ - throw ::onnxruntime::OnnxRuntimeException(ONNXRUNTIME_WHERE_WITH_STACK, #condition, \ +#define ORT_ENFORCE(condition, ...) \ + if (!(condition)) \ + throw ::onnxruntime::OnnxRuntimeException(ORT_WHERE_WITH_STACK, #condition, \ ::onnxruntime::MakeString(__VA_ARGS__)) -#define ONNXRUNTIME_MAKE_STATUS(category, code, ...) \ +#define ORT_MAKE_STATUS(category, code, ...) \ ::onnxruntime::common::Status(::onnxruntime::common::category, \ ::onnxruntime::common::code, \ ::onnxruntime::MakeString(__VA_ARGS__)) // Check condition. if not met, return status. -#define ONNXRUNTIME_RETURN_IF_NOT(condition, ...) \ - if (!(condition)) { \ - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Not satsified: " #condition "\n", \ - ONNXRUNTIME_WHERE.ToString(), ::onnxruntime::MakeString(__VA_ARGS__)); \ +#define ORT_RETURN_IF_NOT(condition, ...) \ + if (!(condition)) { \ + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Not satsified: " #condition "\n", \ + ORT_WHERE.ToString(), ::onnxruntime::MakeString(__VA_ARGS__)); \ } // Macros to disable the copy and/or move ctor and assignment methods // These are usually placed in the private: declarations for a class. -#define ONNXRUNTIME_DISALLOW_COPY(TypeName) TypeName(const TypeName&) = delete +#define ORT_DISALLOW_COPY(TypeName) TypeName(const TypeName&) = delete -#define ONNXRUNTIME_DISALLOW_ASSIGNMENT(TypeName) TypeName& operator=(const TypeName&) = delete +#define ORT_DISALLOW_ASSIGNMENT(TypeName) TypeName& operator=(const TypeName&) = delete -#define ONNXRUNTIME_DISALLOW_COPY_AND_ASSIGNMENT(TypeName) \ - ONNXRUNTIME_DISALLOW_COPY(TypeName); \ - ONNXRUNTIME_DISALLOW_ASSIGNMENT(TypeName) +#define ORT_DISALLOW_COPY_AND_ASSIGNMENT(TypeName) \ + ORT_DISALLOW_COPY(TypeName); \ + ORT_DISALLOW_ASSIGNMENT(TypeName) -#define ONNXRUNTIME_DISALLOW_MOVE(TypeName) \ - TypeName(TypeName&&) = delete; \ +#define ORT_DISALLOW_MOVE(TypeName) \ + TypeName(TypeName&&) = delete; \ TypeName& operator=(TypeName&&) = delete -#define ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TypeName) \ - ONNXRUNTIME_DISALLOW_COPY_AND_ASSIGNMENT(TypeName); \ - ONNXRUNTIME_DISALLOW_MOVE(TypeName) +#define ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TypeName) \ + ORT_DISALLOW_COPY_AND_ASSIGNMENT(TypeName); \ + ORT_DISALLOW_MOVE(TypeName) -#define ONNXRUNTIME_RETURN_IF_ERROR(expr) \ +#define ORT_RETURN_IF_ERROR(expr) \ do { \ auto _status = (expr); \ if ((!_status.IsOK())) return _status; \ } while (0) // use this macro when cannot early return -#define ONNXRUNTIME_CHECK_AND_SET_RETVAL(expr) \ - do { \ - if (retval.IsOK()) { \ - retval = (expr); \ - } \ +#define ORT_CHECK_AND_SET_RETVAL(expr) \ + do { \ + if (retval.IsOK()) { \ + retval = (expr); \ + } \ } while (0) // C++ Core Guideline check suppression diff --git a/include/onnxruntime/core/common/const_pointer_container.h b/include/onnxruntime/core/common/const_pointer_container.h index 9edba9e1cc551..bfc873feb143f 100644 --- a/include/onnxruntime/core/common/const_pointer_container.h +++ b/include/onnxruntime/core/common/const_pointer_container.h @@ -47,7 +47,7 @@ class ConstPointerContainer { const T* operator[](size_t index) const { return data_[index]; } const T* at(size_t index) const { - ONNXRUNTIME_ENFORCE(index < data_.size()); + ORT_ENFORCE(index < data_.size()); return data_[index]; } diff --git a/include/onnxruntime/core/common/logging/capture.h b/include/onnxruntime/core/common/logging/capture.h index dddb36bc00dec..23a2202c6961c 100644 --- a/include/onnxruntime/core/common/logging/capture.h +++ b/include/onnxruntime/core/common/logging/capture.h @@ -101,7 +101,7 @@ class Capture { ~Capture(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Capture); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Capture); const Logger* logger_; const logging::Severity severity_; diff --git a/include/onnxruntime/core/common/logging/isink.h b/include/onnxruntime/core/common/logging/isink.h index 4951f97fb9081..a67777d4ccc8b 100644 --- a/include/onnxruntime/core/common/logging/isink.h +++ b/include/onnxruntime/core/common/logging/isink.h @@ -33,7 +33,7 @@ class ISink { private: // Make Code Analysis happy by disabling all for now. Enable as needed. - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ISink); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ISink); virtual void SendImpl(const Timestamp& timestamp, const std::string& logger_id, const Capture& message) = 0; }; diff --git a/include/onnxruntime/core/common/logging/logging.h b/include/onnxruntime/core/common/logging/logging.h index a8cb3f7c24e98..3c808b9adbeb2 100644 --- a/include/onnxruntime/core/common/logging/logging.h +++ b/include/onnxruntime/core/common/logging/logging.h @@ -96,7 +96,7 @@ namespace logging { using Timestamp = std::chrono::time_point; #ifndef NDEBUG -ONNXRUNTIME_ATTRIBUTE_UNUSED static bool vlog_enabled = true; // Set directly based on your needs. +ORT_ATTRIBUTE_UNUSED static bool vlog_enabled = true; // Set directly based on your needs. #else constexpr bool vlog_enabled = false; // no VLOG output #endif @@ -202,7 +202,7 @@ class LoggingManager final { ~LoggingManager(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(LoggingManager); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(LoggingManager); Timestamp GetTimestamp() const noexcept; void CreateDefaultLogger(const std::string& logger_id); diff --git a/include/onnxruntime/core/common/logging/macros.h b/include/onnxruntime/core/common/logging/macros.h index 577a3a97d1d71..570bc14fa8b55 100644 --- a/include/onnxruntime/core/common/logging/macros.h +++ b/include/onnxruntime/core/common/logging/macros.h @@ -5,7 +5,7 @@ // NOTE: Don't include this file directly. Include logging.h #define CREATE_MESSAGE(logger, severity, category, datatype) \ - ::onnxruntime::logging::Capture(logger, ::onnxruntime::logging::Severity::k##severity, category, datatype, ONNXRUNTIME_WHERE) + ::onnxruntime::logging::Capture(logger, ::onnxruntime::logging::Severity::k##severity, category, datatype, ORT_WHERE) /* Both printf and stream style logging are supported. diff --git a/include/onnxruntime/core/framework/allocator.h b/include/onnxruntime/core/framework/allocator.h index 344ec60c47d9b..35e960a6cf3f6 100644 --- a/include/onnxruntime/core/framework/allocator.h +++ b/include/onnxruntime/core/framework/allocator.h @@ -15,14 +15,14 @@ #include "core/framework/fence.h" #include "core/session/onnxruntime_c_api.h" -struct ONNXRuntimeAllocatorInfo { +struct OrtAllocatorInfo { // use string for name, so we could have customized allocator in execution provider. const char* name; int id; - ONNXRuntimeMemType mem_type; - ONNXRuntimeAllocatorType type; + OrtMemType mem_type; + OrtAllocatorType type; - constexpr ONNXRuntimeAllocatorInfo(const char* name1, ONNXRuntimeAllocatorType type, int id1 = 0, ONNXRuntimeMemType mem_type1 = ONNXRuntimeMemTypeDefault) + constexpr OrtAllocatorInfo(const char* name1, OrtAllocatorType type, int id1 = 0, OrtMemType mem_type1 = OrtMemTypeDefault) #if (defined(__GNUC__) || defined(__clang__)) __attribute__((nonnull)) #endif @@ -32,12 +32,12 @@ struct ONNXRuntimeAllocatorInfo { type(type) { } - inline bool operator==(const ONNXRuntimeAllocatorInfo& other) const { + inline bool operator==(const OrtAllocatorInfo& other) const { return mem_type == other.mem_type && type == other.type && id == other.id && strcmp(name, other.name) == 0; } - // To make ONNXRuntimeAllocatorInfo become a valid key in std map - inline bool operator<(const ONNXRuntimeAllocatorInfo& other) const { + // To make OrtAllocatorInfo become a valid key in std map + inline bool operator<(const OrtAllocatorInfo& other) const { if (type != other.type) return type < other.type; if (mem_type != other.mem_type) @@ -50,7 +50,7 @@ struct ONNXRuntimeAllocatorInfo { inline std::string ToString() const { std::ostringstream ostr; - ostr << "ONNXRuntimeAllocatorInfo: [" + ostr << "OrtAllocatorInfo: [" << " name:" << name << " id:" << id << " mem_type:" << mem_type @@ -60,7 +60,7 @@ struct ONNXRuntimeAllocatorInfo { } }; -std::ostream& operator<<(std::ostream& out, const ONNXRuntimeAllocatorInfo& info); +std::ostream& operator<<(std::ostream& out, const OrtAllocatorInfo& info); namespace onnxruntime { constexpr const char* CPU = "Cpu"; @@ -76,7 +76,7 @@ class IAllocator { virtual ~IAllocator() = default; virtual void* Alloc(size_t size) = 0; virtual void Free(void* p) = 0; - virtual const ONNXRuntimeAllocatorInfo& Info() const = 0; + virtual const OrtAllocatorInfo& Info() const = 0; /** optional CreateFence interface, as provider like DML has its own fence @@ -96,7 +96,7 @@ class IAllocator { * \return true, successful. false, overflow */ template - static bool CalcMemSizeForArrayWithAlignment(size_t nmemb, size_t size, size_t* out) noexcept ONNX_RUNTIME_MUST_USE_RESULT; + static bool CalcMemSizeForArrayWithAlignment(size_t nmemb, size_t size, size_t* out) noexcept ORT_MUST_USE_RESULT; /** * allocate memory for an array which has nmemb items of data, each size bytes long */ @@ -175,7 +175,7 @@ class IDeviceAllocator : public IAllocator { ~IDeviceAllocator() override = default; void* Alloc(size_t size) override = 0; void Free(void* p) override = 0; - const ONNXRuntimeAllocatorInfo& Info() const override = 0; + const OrtAllocatorInfo& Info() const override = 0; virtual bool AllowsArena() const { return true; } }; @@ -183,7 +183,7 @@ class CPUAllocator : public IDeviceAllocator { public: void* Alloc(size_t size) override; void Free(void* p) override; - const ONNXRuntimeAllocatorInfo& Info() const override; + const OrtAllocatorInfo& Info() const override; }; using AllocatorPtr = std::shared_ptr; diff --git a/include/onnxruntime/core/framework/customregistry.h b/include/onnxruntime/core/framework/customregistry.h index 48ee0a1316dab..ffa7212aef3ad 100644 --- a/include/onnxruntime/core/framework/customregistry.h +++ b/include/onnxruntime/core/framework/customregistry.h @@ -32,7 +32,7 @@ class CustomRegistry : public KernelRegistry, public onnxruntime::OnnxRuntimeOpS common::Status RegisterCustomKernel(KernelCreateInfo&); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CustomRegistry); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CustomRegistry); }; } // namespace onnxruntime diff --git a/include/onnxruntime/core/framework/data_types.h b/include/onnxruntime/core/framework/data_types.h index baf435106f6d2..023bc2b362413 100644 --- a/include/onnxruntime/core/framework/data_types.h +++ b/include/onnxruntime/core/framework/data_types.h @@ -202,7 +202,7 @@ struct SetMapTypes { TensorContainedTypeSetter::SetMapKeyType(proto); MLDataType dt = GetMLDataType::value>::Get(); const auto* value_proto = dt->GetTypeProto(); - ONNXRUNTIME_ENFORCE(value_proto != nullptr, typeid(V).name(), + ORT_ENFORCE(value_proto != nullptr, typeid(V).name(), " expected to be a registered ONNX type"); CopyMutableMapValue(*value_proto, proto); } @@ -219,7 +219,7 @@ struct SetSequenceType { static void Set(ONNX_NAMESPACE::TypeProto& proto) { MLDataType dt = GetMLDataType::value>::Get(); const auto* elem_proto = dt->GetTypeProto(); - ONNXRUNTIME_ENFORCE(elem_proto != nullptr, typeid(T).name(), + ORT_ENFORCE(elem_proto != nullptr, typeid(T).name(), " expected to be a registered ONNX type"); CopyMutableSeqElement(*elem_proto, proto); } @@ -258,7 +258,7 @@ class TensorTypeBase : public DataTypeImpl { virtual MLDataType GetElementType() const { // should never reach here. - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } TensorTypeBase(const TensorTypeBase&) = delete; @@ -282,7 +282,7 @@ class TensorTypeBase : public DataTypeImpl { * * \details * Usage: - * ONNXRUNTIME_REGISTER_TENSOR(ELEMENT_TYPE) + * ORT_REGISTER_TENSOR(ELEMENT_TYPE) * Currently all of the Tensors irrespective of the dimensions are mapped to Tensor * type. IsCompatible() currently ignores shape. */ @@ -372,7 +372,7 @@ class NonTensorType : public NonTensorTypeBase { * * \param T - cpp type that you wish to register as runtime MapType * - * \details Usage: ONNXRUNTIME_REGISTER_MAP(C++Type) + * \details Usage: ORT_REGISTER_MAP(C++Type) * The type is required to have mapped_type and * key_type defined */ @@ -401,7 +401,7 @@ class MapType : public NonTensorType { * \param T - CPP type that you wish to register as Sequence * runtime type. * - * \details Usage: ONNXRUNTIME_REGISTER_SEQ(C++Type) + * \details Usage: ORT_REGISTER_SEQ(C++Type) * The type is required to have value_type defined */ template @@ -482,7 +482,7 @@ class NonOnnxType : public DataTypeImpl { // thus a simple way to pre-instantiate a given template // at a registration time does not currently work and the macro // is needed. -#define ONNXRUNTIME_REGISTER_TENSOR_TYPE(ELEM_TYPE) \ +#define ORT_REGISTER_TENSOR_TYPE(ELEM_TYPE) \ template <> \ MLDataType TensorType::Type() { \ static TensorType tensor_type; \ @@ -493,7 +493,7 @@ class NonOnnxType : public DataTypeImpl { return TensorType::Type(); \ } -#define ONNXRUNTIME_REGISTER_MAP(TYPE) \ +#define ORT_REGISTER_MAP(TYPE) \ template <> \ MLDataType MapType::Type() { \ static MapType map_type; \ @@ -504,7 +504,7 @@ class NonOnnxType : public DataTypeImpl { return MapType::Type(); \ } -#define ONNXRUNTIME_REGISTER_SEQ(TYPE) \ +#define ORT_REGISTER_SEQ(TYPE) \ template <> \ MLDataType SequenceType::Type() { \ static SequenceType sequence_type; \ @@ -515,7 +515,7 @@ class NonOnnxType : public DataTypeImpl { return SequenceType::Type(); \ } -#define ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(TYPE) \ +#define ORT_REGISTER_NON_ONNX_TYPE(TYPE) \ template <> \ MLDataType NonOnnxType::Type() { \ static NonOnnxType non_onnx_type; \ @@ -526,7 +526,7 @@ class NonOnnxType : public DataTypeImpl { return NonOnnxType::Type(); \ } -#define ONNXRUNTIME_REGISTER_OPAQUE_TYPE(CPPType, Domain, Name) \ +#define ORT_REGISTER_OPAQUE_TYPE(CPPType, Domain, Name) \ template <> \ MLDataType OpaqueType::Type() { \ static OpaqueType opaque_type; \ diff --git a/include/onnxruntime/core/framework/environment.h b/include/onnxruntime/core/framework/environment.h index f9ef84d0de62c..f36ebb60d0011 100644 --- a/include/onnxruntime/core/framework/environment.h +++ b/include/onnxruntime/core/framework/environment.h @@ -31,7 +31,7 @@ class Environment { static bool IsInitialized() { return is_initialized_; } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Environment); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Environment); Environment() = default; Status Initialize(); diff --git a/include/onnxruntime/core/framework/execution_provider.h b/include/onnxruntime/core/framework/execution_provider.h index d065d9e6dd8be..0d6f3133aa884 100644 --- a/include/onnxruntime/core/framework/execution_provider.h +++ b/include/onnxruntime/core/framework/execution_provider.h @@ -40,7 +40,7 @@ class IExecutionProvider { /** Get allocator with specified MemType */ - virtual AllocatorPtr GetAllocator(int id, ONNXRuntimeMemType mem_type) const; + virtual AllocatorPtr GetAllocator(int id, OrtMemType mem_type) const; /** Get execution provider's capability for the specified . diff --git a/include/onnxruntime/core/framework/kernel_def_builder.h b/include/onnxruntime/core/framework/kernel_def_builder.h index a8d34f10376f9..dd30683266983 100644 --- a/include/onnxruntime/core/framework/kernel_def_builder.h +++ b/include/onnxruntime/core/framework/kernel_def_builder.h @@ -17,12 +17,12 @@ namespace onnxruntime { class KernelDefBuilder; -typedef std::map MemTypeMap; +typedef std::map MemTypeMap; // note that input/output might be on CPU implicitly when the node is from CPU execution provider inline bool MemTypeOnCpuExplicitly(const MemTypeMap& mem_type_map, size_t index) { auto iter = mem_type_map.find(index); - return iter != mem_type_map.end() && (iter->second == ONNXRuntimeMemTypeCPUInput || iter->second == ONNXRuntimeMemTypeCPUOutput); + return iter != mem_type_map.end() && (iter->second == OrtMemTypeCPUInput || iter->second == OrtMemTypeCPUOutput); } class KernelDef { @@ -188,7 +188,7 @@ class KernelDefBuilder { Specify that this kernel requires an input arg in certain memory type (instead of the default, device memory). */ - template + template KernelDefBuilder& InputMemoryType(int input_index) { kernel_def_->input_memory_type_args_.insert(std::make_pair(input_index, T)); return *this; @@ -198,7 +198,7 @@ class KernelDefBuilder { Specify that this kernel provides an output arg in certain memory type (instead of the default, device memory). */ - template + template KernelDefBuilder& OutputMemoryType(int output_index) { kernel_def_->output_memory_type_args_.insert(std::make_pair(output_index, T)); return *this; diff --git a/include/onnxruntime/core/framework/kernel_registry.h b/include/onnxruntime/core/framework/kernel_registry.h index ff70ef5dd38e5..6767df53ecb50 100644 --- a/include/onnxruntime/core/framework/kernel_registry.h +++ b/include/onnxruntime/core/framework/kernel_registry.h @@ -16,7 +16,7 @@ class KernelRegistry { std::string provider = info.kernel_def->Provider(); auto st = Register(info); if (!st.IsOK()) - ONNXRUNTIME_THROW("Register Kernel ", name, " in ", provider, " failed:", st.ErrorMessage()); + ORT_THROW("Register Kernel ", name, " in ", provider, " failed:", st.ErrorMessage()); }); } diff --git a/include/onnxruntime/core/framework/ml_value.h b/include/onnxruntime/core/framework/ml_value.h index 051dc1d297398..b5456863462bd 100644 --- a/include/onnxruntime/core/framework/ml_value.h +++ b/include/onnxruntime/core/framework/ml_value.h @@ -34,13 +34,13 @@ class MLValue { template const T& Get() const { - ONNXRUNTIME_ENFORCE(DataTypeImpl::GetType() == type_, DataTypeImpl::GetType(), " != ", type_); + ORT_ENFORCE(DataTypeImpl::GetType() == type_, DataTypeImpl::GetType(), " != ", type_); return *static_cast(data_.get()); } template T* GetMutable() { - ONNXRUNTIME_ENFORCE(DataTypeImpl::GetType() == type_, DataTypeImpl::GetType(), " != ", type_); + ORT_ENFORCE(DataTypeImpl::GetType() == type_, DataTypeImpl::GetType(), " != ", type_); return static_cast(data_.get()); } diff --git a/include/onnxruntime/core/framework/onnx_object_cxx.h b/include/onnxruntime/core/framework/onnx_object_cxx.h index a7699c17858b2..f47389ba0cd2c 100644 --- a/include/onnxruntime/core/framework/onnx_object_cxx.h +++ b/include/onnxruntime/core/framework/onnx_object_cxx.h @@ -19,19 +19,19 @@ class ObjectBase { static ONNXObject static_cls; protected: - const ONNXObject* const ONNXRUNTIME_ATTRIBUTE_UNUSED cls_; + const ONNXObject* const ORT_ATTRIBUTE_UNUSED cls_; std::atomic_int ref_count; ObjectBase() : cls_(&static_cls), ref_count(1) { } - static uint32_t ONNXRUNTIME_API_CALL ONNXRuntimeReleaseImpl(void* this_) { + static uint32_t ORT_API_CALL OrtReleaseImpl(void* this_) { T* this_ptr = reinterpret_cast(this_); if (--this_ptr->ref_count == 0) delete this_ptr; return 0; } - static uint32_t ONNXRUNTIME_API_CALL ONNXRuntimeAddRefImpl(void* this_) { + static uint32_t ORT_API_CALL OrtAddRefImpl(void* this_) { T* this_ptr = reinterpret_cast(this_); ++this_ptr->ref_count; return 0; @@ -39,9 +39,9 @@ class ObjectBase { }; template -ONNXObject ObjectBase::static_cls = {ObjectBase::ONNXRuntimeAddRefImpl, ObjectBase::ONNXRuntimeReleaseImpl}; +ONNXObject ObjectBase::static_cls = {ObjectBase::OrtAddRefImpl, ObjectBase::OrtReleaseImpl}; } // namespace onnxruntime -#define ONNXRUNTIME_CHECK_C_OBJECT_LAYOUT \ +#define ORT_CHECK_C_OBJECT_LAYOUT \ { assert((char*)&ref_count == (char*)this + sizeof(this)); } diff --git a/include/onnxruntime/core/framework/op_kernel.h b/include/onnxruntime/core/framework/op_kernel.h index f0bfe2fff160b..68b15b853d70b 100644 --- a/include/onnxruntime/core/framework/op_kernel.h +++ b/include/onnxruntime/core/framework/op_kernel.h @@ -43,17 +43,17 @@ class OpKernel { virtual Status ComputeAsync(OpKernelContext*, DoneCallback) const { - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } - const ONNXRuntimeAllocatorInfo& Allocator(int id, ONNXRuntimeMemType mem_type) const { + const OrtAllocatorInfo& Allocator(int id, OrtMemType mem_type) const { return op_kernel_info_.GetAllocatorInfo(id, mem_type); } const OpKernelInfo& Info() const { return op_kernel_info_; } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OpKernel); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OpKernel); OpKernelInfo op_kernel_info_; }; @@ -90,7 +90,7 @@ class OpKernelContext { return nullptr; MLValue* p_ml_value = nullptr; - ONNXRUNTIME_ENFORCE(GetOrCreateOutputMLValue(index, p_ml_value).IsOK()); + ORT_ENFORCE(GetOrCreateOutputMLValue(index, p_ml_value).IsOK()); return p_ml_value ? p_ml_value->GetMutable() : nullptr; } @@ -116,7 +116,7 @@ class OpKernelContext { } /** - * return an allocator on device 0, with memtype of ONNXRuntimeMemTypeDefault + * return an allocator on device 0, with memtype of OrtMemTypeDefault * */ Status GetTempSpaceAllocator(AllocatorPtr* output) const; @@ -174,7 +174,7 @@ class OpKernelContext { template <> inline Tensor* OpKernelContext::Output(int index) { MLValue* p_ml_value = GetOutputMLValue(index); - ONNXRUNTIME_ENFORCE(p_ml_value, "Please fetch output tensor with specified shape."); + ORT_ENFORCE(p_ml_value, "Please fetch output tensor with specified shape."); return p_ml_value->GetMutable(); } diff --git a/include/onnxruntime/core/framework/op_kernel_info.h b/include/onnxruntime/core/framework/op_kernel_info.h index c0b263c33dbd7..cb936f0a0e629 100644 --- a/include/onnxruntime/core/framework/op_kernel_info.h +++ b/include/onnxruntime/core/framework/op_kernel_info.h @@ -29,7 +29,7 @@ class OpKernelInfo : public OpNodeProtoHelper { OpKernelInfo(const OpKernelInfo& other); - const ONNXRuntimeAllocatorInfo& GetAllocatorInfo(int device_id, ONNXRuntimeMemType mem_type) const; + const OrtAllocatorInfo& GetAllocatorInfo(int device_id, OrtMemType mem_type) const; const KernelDef& GetKernelDef() const; @@ -40,8 +40,8 @@ class OpKernelInfo : public OpNodeProtoHelper { bool TryGetConstantInput(int input_index, const Tensor** constant_input_value) const; private: - ONNXRUNTIME_DISALLOW_MOVE(OpKernelInfo); - ONNXRUNTIME_DISALLOW_ASSIGNMENT(OpKernelInfo); + ORT_DISALLOW_MOVE(OpKernelInfo); + ORT_DISALLOW_ASSIGNMENT(OpKernelInfo); const onnxruntime::Node& node_; const KernelDef& kernel_def_; diff --git a/include/onnxruntime/core/framework/op_node_proto_helper.h b/include/onnxruntime/core/framework/op_node_proto_helper.h index 03d0849cd6573..f585ccf9f0f33 100644 --- a/include/onnxruntime/core/framework/op_node_proto_helper.h +++ b/include/onnxruntime/core/framework/op_node_proto_helper.h @@ -8,14 +8,14 @@ #include "gsl/span" #ifdef __has_attribute -#define ONNXRUNTIME_HAVE_ATTRIBUTE(x) __has_attribute(x) +#define ORT_HAVE_ATTRIBUTE(x) __has_attribute(x) #else -#define ONNXRUNTIME_HAVE_ATTRIBUTE(x) 0 +#define ORT_HAVE_ATTRIBUTE(x) 0 #endif -#if ONNXRUNTIME_HAVE_ATTRIBUTE(nodiscard) +#if ORT_HAVE_ATTRIBUTE(nodiscard) #define MUST_USE_RESULT [[nodiscard]] -#elif defined(__clang__) && ONNXRUNTIME_HAVE_ATTRIBUTE(warn_unused_result) +#elif defined(__clang__) && ORT_HAVE_ATTRIBUTE(warn_unused_result) #define MUST_USE_RESULT __attribute__((warn_unused_result)) #else #define MUST_USE_RESULT @@ -110,7 +110,7 @@ class OpNodeProtoHelper { const ONNX_NAMESPACE::AttributeProto* GetAttribute(const std::string& name) const { const ONNX_NAMESPACE::AttributeProto* attr = TryGetAttribute(name); - ONNXRUNTIME_ENFORCE(attr != nullptr); + ORT_ENFORCE(attr != nullptr); return attr; } diff --git a/include/onnxruntime/core/framework/run_options.h b/include/onnxruntime/core/framework/run_options.h index eba526075eb7e..9c6670a3b0ff1 100644 --- a/include/onnxruntime/core/framework/run_options.h +++ b/include/onnxruntime/core/framework/run_options.h @@ -12,24 +12,24 @@ /** * Configuration information for a single Run. */ -struct ONNXRuntimeRunOptions : public onnxruntime::ObjectBase { +struct OrtRunOptions : public onnxruntime::ObjectBase { unsigned run_log_verbosity_level = 0; ///< applies to a particular Run() invocation std::string run_tag; ///< to identify logs generated by a particular Run() invocation /// set to 'true' to terminate any currently executing Run() calls that are using this - /// ONNXRuntimeRunOptions instance. the individual calls will exit gracefully and return an error status. + /// OrtRunOptions instance. the individual calls will exit gracefully and return an error status. bool terminate = false; - ONNXRuntimeRunOptions() = default; - ~ONNXRuntimeRunOptions() = default; + OrtRunOptions() = default; + ~OrtRunOptions() = default; // disable copy, move and assignment. we don't want accidental copies, to ensure that the instance provided to // the Run() call never changes and the terminate mechanism will work. - ONNXRuntimeRunOptions(const ONNXRuntimeRunOptions&) = delete; - ONNXRuntimeRunOptions(ONNXRuntimeRunOptions&&) = delete; - ONNXRuntimeRunOptions& operator=(const ONNXRuntimeRunOptions&) = delete; - ONNXRuntimeRunOptions& operator=(ONNXRuntimeRunOptions&&) = delete; + OrtRunOptions(const OrtRunOptions&) = delete; + OrtRunOptions(OrtRunOptions&&) = delete; + OrtRunOptions& operator=(const OrtRunOptions&) = delete; + OrtRunOptions& operator=(OrtRunOptions&&) = delete; }; namespace onnxruntime { -using RunOptions = ONNXRuntimeRunOptions; +using RunOptions = OrtRunOptions; } diff --git a/include/onnxruntime/core/framework/tensor.h b/include/onnxruntime/core/framework/tensor.h index a548b2b540c15..f2ab6f263fad9 100644 --- a/include/onnxruntime/core/framework/tensor.h +++ b/include/onnxruntime/core/framework/tensor.h @@ -63,7 +63,7 @@ class Tensor final { Tensor(MLDataType p_type, const TensorShape& shape, BufferNakedPtr p_data, - const ONNXRuntimeAllocatorInfo& alloc, + const OrtAllocatorInfo& alloc, AllocatorPtr deleter = nullptr, int64_t offset = 0); @@ -95,7 +95,7 @@ class Tensor final { /** Returns the location of the tensor's memory */ - const ONNXRuntimeAllocatorInfo& Location() const { return alloc_info_; } + const OrtAllocatorInfo& Location() const { return alloc_info_; } /** May return nullptr if tensor size is zero @@ -103,8 +103,8 @@ class Tensor final { template T* MutableData() { // Type check - ONNXRUNTIME_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", - DataTypeImpl::GetType(), "!=", dtype_); + ORT_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", + DataTypeImpl::GetType(), "!=", dtype_); return reinterpret_cast(static_cast(p_data_) + byte_offset_); } @@ -114,8 +114,8 @@ class Tensor final { template gsl::span MutableDataAsSpan() { // Type check - ONNXRUNTIME_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", - DataTypeImpl::GetType(), "!=", dtype_); + ORT_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", + DataTypeImpl::GetType(), "!=", dtype_); T* data = reinterpret_cast(static_cast(p_data_) + byte_offset_); return gsl::make_span(data, shape_.Size()); } @@ -123,27 +123,27 @@ class Tensor final { template const T* Data() const { // Type check - ONNXRUNTIME_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", - DataTypeImpl::GetType(), "!=", dtype_); + ORT_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", + DataTypeImpl::GetType(), "!=", dtype_); return reinterpret_cast(static_cast(p_data_) + byte_offset_); } template gsl::span DataAsSpan() const { // Type check - ONNXRUNTIME_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", - DataTypeImpl::GetType(), "!=", dtype_); + ORT_ENFORCE(DataTypeImpl::GetType() == dtype_, "Tensor type mismatch. ", + DataTypeImpl::GetType(), "!=", dtype_); const T* data = reinterpret_cast(static_cast(p_data_) + byte_offset_); return gsl::make_span(data, shape_.Size()); } void* MutableDataRaw(MLDataType type) { - ONNXRUNTIME_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_); + ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_); return p_data_; } const void* DataRaw(MLDataType type) const { - ONNXRUNTIME_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_); + ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_); return p_data_; } @@ -161,9 +161,9 @@ class Tensor final { * @warning this function is NOT thread-safe. */ inline void Reshape(const TensorShape& new_shape) { - ONNXRUNTIME_ENFORCE(shape_.Size() == new_shape.Size(), - "Tensor size (" + std::to_string(shape_.Size()) + - ") != new size (" + std::to_string(new_shape.Size()) + ")"); + ORT_ENFORCE(shape_.Size() == new_shape.Size(), + "Tensor size (" + std::to_string(shape_.Size()) + + ") != new size (" + std::to_string(new_shape.Size()) + ")"); shape_ = new_shape; } @@ -176,7 +176,7 @@ class Tensor final { void Init(MLDataType p_type, const TensorShape& shape, void* p_raw_data, - const ONNXRuntimeAllocatorInfo& alloc, + const OrtAllocatorInfo& alloc, AllocatorPtr deleter, int64_t offset = 0); @@ -192,7 +192,7 @@ class Tensor final { TensorShape shape_; MLDataType dtype_; - ONNXRuntimeAllocatorInfo alloc_info_; + OrtAllocatorInfo alloc_info_; int64_t byte_offset_; }; #ifdef __GNUC__ diff --git a/include/onnxruntime/core/graph/graph.h b/include/onnxruntime/core/graph/graph.h index 8a4f1fd08bc7e..a61607c3b1760 100644 --- a/include/onnxruntime/core/graph/graph.h +++ b/include/onnxruntime/core/graph/graph.h @@ -120,7 +120,7 @@ class Node { auto arg = node_args[index]; if (!arg->Exists()) continue; - ONNXRUNTIME_RETURN_IF_ERROR(func(*arg, index)); + ORT_RETURN_IF_ERROR(func(*arg, index)); } return common::Status::OK(); } @@ -313,7 +313,7 @@ class Node { std::vector implicit_input_defs; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Definitions); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Definitions); }; /** @@ -340,11 +340,11 @@ class Node { std::set control_inputs; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Relationships); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Relationships); }; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Node); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Node); // NOTE: This friendship relationship should ONLY be used for calling methods of the Node class and not accessing // the data members directly, so that the Node can maintain its internal invariants. @@ -707,7 +707,7 @@ class Graph { when the Graph is resolved. */ void AddOuterScopeNodeArg(const std::string& name) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(outer_scope_node_arg_names_.insert(name)); + ORT_IGNORE_RETURN_VALUE(outer_scope_node_arg_names_.insert(name)); } /** When programmatically constructing a Graph, explicitly set the order to use for graph inputs when the graph is @@ -742,7 +742,7 @@ class Graph { virtual ~Graph(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Graph); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Graph); // This friendship relationship should only be used to call Graph::Graph and // Graph::LoadGraph All other access should be via the public API. @@ -807,7 +807,7 @@ class Graph { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ResolveContext); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ResolveContext); }; // search this and up through any parent_graph_ instance for a NodeArg @@ -884,9 +884,9 @@ class Graph { Node* NodeAtIndexImpl(NodeIndex node_index) const { // if we are trying to access a node that doesn't exist there's (most // likely) either a logic issue or a graph consistency/correctness issue. - // use ONNXRUNTIME_ENFORCE to prove that or uncover scenarios where we actually + // use ORT_ENFORCE to prove that or uncover scenarios where we actually // expect attempts to retrieve a non-existent node. - ONNXRUNTIME_ENFORCE(node_index < nodes_.size(), "Validating no unexpected access using an invalid node_index."); + ORT_ENFORCE(node_index < nodes_.size(), "Validating no unexpected access using an invalid node_index."); return nodes_[node_index].get(); } diff --git a/include/onnxruntime/core/graph/graph_transformer.h b/include/onnxruntime/core/graph/graph_transformer.h index 78493f695b501..c9a9aa33b9ff2 100644 --- a/include/onnxruntime/core/graph/graph_transformer.h +++ b/include/onnxruntime/core/graph/graph_transformer.h @@ -39,7 +39,7 @@ class GraphTransformer { virtual common::Status Apply(Graph& graph, bool& modified) const = 0; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphTransformer); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphTransformer); const std::string name_; const std::string desc_; diff --git a/include/onnxruntime/core/graph/graph_viewer.h b/include/onnxruntime/core/graph/graph_viewer.h index b464a3a6aaa40..5cb65fd7a5d00 100644 --- a/include/onnxruntime/core/graph/graph_viewer.h +++ b/include/onnxruntime/core/graph/graph_viewer.h @@ -98,7 +98,7 @@ class GraphViewer { const NodeArg* GetNodeArg(const std::string& name) const; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphViewer); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphViewer); const Graph* graph_; diff --git a/include/onnxruntime/core/graph/node_arg.h b/include/onnxruntime/core/graph/node_arg.h index 8542c9e037368..06ff04cdac65f 100644 --- a/include/onnxruntime/core/graph/node_arg.h +++ b/include/onnxruntime/core/graph/node_arg.h @@ -75,7 +75,7 @@ class NodeArg { bool Exists() const noexcept; private: - ONNXRUNTIME_DISALLOW_COPY_AND_ASSIGNMENT(NodeArg); + ORT_DISALLOW_COPY_AND_ASSIGNMENT(NodeArg); friend class Graph; void SetType(ONNX_NAMESPACE::DataType p_type); diff --git a/include/onnxruntime/core/graph/rewrite_rule.h b/include/onnxruntime/core/graph/rewrite_rule.h index db979bdfd2821..ec3cd957618fa 100644 --- a/include/onnxruntime/core/graph/rewrite_rule.h +++ b/include/onnxruntime/core/graph/rewrite_rule.h @@ -47,7 +47,7 @@ class RewriteRule { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RewriteRule); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RewriteRule); const std::string name_; const std::string desc_; diff --git a/include/onnxruntime/core/providers/cpu/cpu_provider_factory.h b/include/onnxruntime/core/providers/cpu/cpu_provider_factory.h index 766c440292897..cda1766ad3010 100644 --- a/include/onnxruntime/core/providers/cpu/cpu_provider_factory.h +++ b/include/onnxruntime/core/providers/cpu/cpu_provider_factory.h @@ -9,13 +9,13 @@ extern "C" { /** * \param use_arena zero: false. non-zero: true. - * \param out Call ONNXRuntimeReleaseObject() method when you no longer need to use it. + * \param out Call OrtReleaseObject() method when you no longer need to use it. */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateCpuExecutionProviderFactory, int use_arena, _Out_ ONNXRuntimeProviderFactoryInterface*** out) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API_STATUS(OrtCreateCpuExecutionProviderFactory, int use_arena, _Out_ OrtProviderFactoryInterface*** out) +ORT_ALL_ARGS_NONNULL; -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateCpuAllocatorInfo, enum ONNXRuntimeAllocatorType type, enum ONNXRuntimeMemType mem_type1, _Out_ ONNXRuntimeAllocatorInfo** out) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API_STATUS(OrtCreateCpuAllocatorInfo, enum OrtAllocatorType type, enum OrtMemType mem_type1, _Out_ OrtAllocatorInfo** out) +ORT_ALL_ARGS_NONNULL; #ifdef __cplusplus } diff --git a/include/onnxruntime/core/providers/cuda/cuda_provider_factory.h b/include/onnxruntime/core/providers/cuda/cuda_provider_factory.h index d9c80a5978959..39f0fbc776aff 100644 --- a/include/onnxruntime/core/providers/cuda/cuda_provider_factory.h +++ b/include/onnxruntime/core/providers/cuda/cuda_provider_factory.h @@ -8,9 +8,9 @@ extern "C" { #endif /** * \param device_id cuda device id, starts from zero. - * \param out Call ONNXRuntimeReleaseObject() method when you no longer need to use it. + * \param out Call OrtReleaseObject() method when you no longer need to use it. */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateCUDAExecutionProviderFactory, int device_id, _Out_ ONNXRuntimeProviderFactoryInterface*** out); +ORT_API_STATUS(OrtCreateCUDAExecutionProviderFactory, int device_id, _Out_ OrtProviderFactoryInterface*** out); #ifdef __cplusplus } diff --git a/include/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.h b/include/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.h index 21fe68cd03fc4..25603f121c75b 100644 --- a/include/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.h +++ b/include/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.h @@ -9,9 +9,9 @@ extern "C" { /** * \param use_arena zero: false. non-zero: true. - * \param out Call ONNXRuntimeReleaseObject() method when you no longer need to use it. + * \param out Call OrtReleaseObject() method when you no longer need to use it. */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateMkldnnExecutionProviderFactory, int use_arena, _Out_ ONNXRuntimeProviderFactoryInterface*** out); +ORT_API_STATUS(OrtCreateMkldnnExecutionProviderFactory, int use_arena, _Out_ OrtProviderFactoryInterface*** out); #ifdef __cplusplus } diff --git a/include/onnxruntime/core/session/onnxruntime_c_api.h b/include/onnxruntime/core/session/onnxruntime_c_api.h index 2a4f525998571..4c9a8b56402f0 100644 --- a/include/onnxruntime/core/session/onnxruntime_c_api.h +++ b/include/onnxruntime/core/session/onnxruntime_c_api.h @@ -24,26 +24,26 @@ extern "C" { #define _Inout_ #define _Inout_opt_ #define _Frees_ptr_opt_ -#define ONNXRUNTIME_ALL_ARGS_NONNULL __attribute__((nonnull)) +#define ORT_ALL_ARGS_NONNULL __attribute__((nonnull)) #else #include -#define ONNXRUNTIME_ALL_ARGS_NONNULL +#define ORT_ALL_ARGS_NONNULL #endif #ifdef _WIN32 -// Define ONNX_RUNTIME_DLL_IMPORT if your program is dynamically linked to onnxruntime. +// Define ORT_DLL_IMPORT if your program is dynamically linked to Ort. // dllexport is not used, we use a .def file. -#ifdef ONNX_RUNTIME_DLL_IMPORT -#define ONNX_RUNTIME_EXPORT __declspec(dllimport) +#ifdef ORT_DLL_IMPORT +#define ORT_EXPORT __declspec(dllimport) #else -#define ONNX_RUNTIME_EXPORT +#define ORT_EXPORT #endif -#define ONNXRUNTIME_API_CALL _stdcall -#define ONNX_RUNTIME_MUST_USE_RESULT +#define ORT_API_CALL _stdcall +#define ORT_MUST_USE_RESULT #else -#define ONNX_RUNTIME_EXPORT -#define ONNXRUNTIME_API_CALL -#define ONNX_RUNTIME_MUST_USE_RESULT __attribute__((warn_unused_result)) +#define ORT_EXPORT +#define ORT_API_CALL +#define ORT_MUST_USE_RESULT __attribute__((warn_unused_result)) #endif //Any pointer marked with _In_ or _Out_, cannot be NULL. Caller should ensure that. @@ -58,38 +58,38 @@ extern "C" { #define NO_EXCEPTION #endif -typedef enum ONNXRuntimeErrorCode { - ONNXRUNTIME_OK = 0, - ONNXRUNTIME_FAIL = 1, - ONNXRUNTIME_INVALID_ARGUMENT = 2, - ONNXRUNTIME_NO_SUCHFILE = 3, - ONNXRUNTIME_NO_MODEL = 4, - ONNXRUNTIME_ENGINE_ERROR = 5, - ONNXRUNTIME_RUNTIME_EXCEPTION = 6, - ONNXRUNTIME_INVALID_PROTOBUF = 7, - ONNXRUNTIME_MODEL_LOADED = 8, - ONNXRUNTIME_NOT_IMPLEMENTED = 9, - ONNXRUNTIME_INVALID_GRAPH = 10, - ONNXRUNTIME_SHAPE_INFERENCE_NOT_REGISTERED = 11, - ONNXRUNTIME_REQUIREMENT_NOT_REGISTERED = 12 -} ONNXRuntimeErrorCode; +typedef enum OrtErrorCode { + ORT_OK = 0, + ORT_FAIL = 1, + ORT_INVALID_ARGUMENT = 2, + ORT_NO_SUCHFILE = 3, + ORT_NO_MODEL = 4, + ORT_ENGINE_ERROR = 5, + ORT_RUNTIME_EXCEPTION = 6, + ORT_INVALID_PROTOBUF = 7, + ORT_MODEL_LOADED = 8, + ORT_NOT_IMPLEMENTED = 9, + ORT_INVALID_GRAPH = 10, + ORT_SHAPE_INFERENCE_NOT_REGISTERED = 11, + ORT_REQUIREMENT_NOT_REGISTERED = 12 +} OrtErrorCode; // ONNXStatus is always returned as a pointer. nullptr indicates success typedef void ONNXStatus; // __VA_ARGS__ on Windows and Linux are different -#define ONNXRUNTIME_API(RETURN_TYPE, NAME, ...) \ - ONNX_RUNTIME_EXPORT RETURN_TYPE ONNXRUNTIME_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION +#define ORT_API(RETURN_TYPE, NAME, ...) \ + ORT_EXPORT RETURN_TYPE ORT_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION -#define ONNXRUNTIME_API_STATUS(NAME, ...) \ - ONNX_RUNTIME_EXPORT ONNXStatus* ONNXRUNTIME_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION ONNX_RUNTIME_MUST_USE_RESULT +#define ORT_API_STATUS(NAME, ...) \ + ORT_EXPORT ONNXStatus* ORT_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION ORT_MUST_USE_RESULT -// Used in *.cc files. Almost as same as ONNXRUNTIME_API_STATUS, except without ONNX_RUNTIME_MUST_USE_RESULT -#define ONNXRUNTIME_API_STATUS_IMPL(NAME, ...) \ - ONNX_RUNTIME_EXPORT ONNXStatus* ONNXRUNTIME_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION +// Used in *.cc files. Almost as same as ORT_API_STATUS, except without ORT_MUST_USE_RESULT +#define ORT_API_STATUS_IMPL(NAME, ...) \ + ORT_EXPORT ONNXStatus* ORT_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION #define DEFINE_RUNTIME_CLASS2(NAME, TYPE) \ - ONNXRUNTIME_API(void, Release##NAME, _Frees_ptr_opt_ TYPE* input); + ORT_API(void, Release##NAME, _Frees_ptr_opt_ TYPE* input); #define DEFINE_RUNTIME_CLASS(X) \ struct X; \ @@ -98,7 +98,7 @@ typedef void ONNXStatus; // ONNXStatus* is pointer to something like this: // struct ONNXStatus { -// ONNXRuntimeErrorCode code; +// OrtErrorCode code; // char msg[]; // a null-terminated string, var length // } DEFINE_RUNTIME_CLASS2(ONNXStatus, void); @@ -106,26 +106,26 @@ DEFINE_RUNTIME_CLASS2(ONNXStatus, void); /** * \param msg A null-terminated string. Its content will be copied into the newly created ONNXStatus */ -ONNXRUNTIME_API(ONNXStatus*, CreateONNXStatus, ONNXRuntimeErrorCode code, _In_ const char* msg) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API(ONNXStatus*, CreateONNXStatus, OrtErrorCode code, _In_ const char* msg) +ORT_ALL_ARGS_NONNULL; -ONNXRUNTIME_API(ONNXRuntimeErrorCode, ONNXRuntimeGetErrorCode, _In_ const ONNXStatus* status) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API(OrtErrorCode, OrtGetErrorCode, _In_ const ONNXStatus* status) +ORT_ALL_ARGS_NONNULL; /** * \param status must not be NULL * \return The error message inside the `status`. Don't free the returned value. */ -ONNXRUNTIME_API(const char*, ONNXRuntimeGetErrorMessage, _In_ const ONNXStatus* status) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API(const char*, OrtGetErrorMessage, _In_ const ONNXStatus* status) +ORT_ALL_ARGS_NONNULL; // // Tensor Type and Shapes // -struct ONNXRuntimeTensorTypeAndShapeInfo; +struct OrtTensorTypeAndShapeInfo; //copied from TensorProto::DataType -//Currently, ONNXRuntime doesn't support complex64, complex128, bfloat16 types -typedef enum OnnxRuntimeTensorElementDataType { +//Currently, Ort doesn't support complex64, complex128, bfloat16 types +typedef enum OrtTensorElementDataType { ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED = 0, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT = 1, // maps to c type float ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8 = 2, // maps to c type uint8_t @@ -143,42 +143,42 @@ typedef enum OnnxRuntimeTensorElementDataType { ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64 = 14, // complex with float32 real and imaginary components ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128 = 15, // complex with float64 real and imaginary components ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 = 16, // Non-IEEE floating-point format based on IEEE754 single-precision -} OnnxRuntimeTensorElementDataType; +} OrtTensorElementDataType; //sync with onnx TypeProto oneof -typedef enum ONNXRuntimeType { - ONNXRUNTIME_TYPE_UNKNOWN, - ONNXRUNTIME_TYPE_TENSOR, - ONNXRUNTIME_TYPE_SEQUENCE, - ONNXRUNTIME_TYPE_MAP, - ONNXRUNTIME_TYPE_OPAQUE, - ONNXRUNTIME_TYPE_SPARSETENSOR, -} ONNXRuntimeType; +typedef enum OrtType { + ORT_TYPE_UNKNOWN, + ORT_TYPE_TENSOR, + ORT_TYPE_SEQUENCE, + ORT_TYPE_MAP, + ORT_TYPE_OPAQUE, + ORT_TYPE_SPARSETENSOR, +} OrtType; -struct ONNXRuntimeTypeInfo; +struct OrtTypeInfo; /** * Don't free the returned value */ -ONNXRUNTIME_API(const struct ONNXRuntimeTensorTypeAndShapeInfo*, ONNXRuntimeCastTypeInfoToTensorInfo, _In_ struct ONNXRuntimeTypeInfo*); +ORT_API(const struct OrtTensorTypeAndShapeInfo*, OrtCastTypeInfoToTensorInfo, _In_ struct OrtTypeInfo*); /** - * The retured value should be released by calling ONNXRuntimeReleaseObject + * The retured value should be released by calling OrtReleaseObject */ -ONNXRUNTIME_API(struct ONNXRuntimeTensorTypeAndShapeInfo*, ONNXRuntimeCreateTensorTypeAndShapeInfo); +ORT_API(struct OrtTensorTypeAndShapeInfo*, OrtCreateTensorTypeAndShapeInfo); -ONNXRUNTIME_API_STATUS(ONNXRuntimeSetTensorElementType, _In_ struct ONNXRuntimeTensorTypeAndShapeInfo*, enum OnnxRuntimeTensorElementDataType type); +ORT_API_STATUS(OrtSetTensorElementType, _In_ struct OrtTensorTypeAndShapeInfo*, enum OrtTensorElementDataType type); /** - * \param info Created from ONNXRuntimeCreateTensorTypeAndShapeInfo() function + * \param info Created from OrtCreateTensorTypeAndShapeInfo() function * \param dim_values An array with length of `dim_count`. Its elements can contain negative values. * \param dim_count length of dim_values */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeSetDims, struct ONNXRuntimeTensorTypeAndShapeInfo* info, _In_ const int64_t* dim_values, size_t dim_count); +ORT_API_STATUS(OrtSetDims, struct OrtTensorTypeAndShapeInfo* info, _In_ const int64_t* dim_values, size_t dim_count); -ONNXRUNTIME_API(enum OnnxRuntimeTensorElementDataType, ONNXRuntimeGetTensorElementType, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo*); -ONNXRUNTIME_API(size_t, ONNXRuntimeGetNumOfDimensions, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo* info); -ONNXRUNTIME_API(void, ONNXRuntimeGetDimensions, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo* info, _Out_ int64_t* dim_values, size_t dim_values_length); +ORT_API(enum OrtTensorElementDataType, OrtGetTensorElementType, _In_ const struct OrtTensorTypeAndShapeInfo*); +ORT_API(size_t, OrtGetNumOfDimensions, _In_ const struct OrtTensorTypeAndShapeInfo* info); +ORT_API(void, OrtGetDimensions, _In_ const struct OrtTensorTypeAndShapeInfo* info, _Out_ int64_t* dim_values, size_t dim_values_length); /** * How many elements does this tensor have. @@ -191,55 +191,55 @@ ONNXRUNTIME_API(void, ONNXRuntimeGetDimensions, _In_ const struct ONNXRuntimeTen * return a negative value if unknown. (That this shape contains a symbolic variable which * represents an unknown dimension.) */ -ONNXRUNTIME_API(int64_t, ONNXRuntimeGetTensorShapeElementCount, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo* info); +ORT_API(int64_t, OrtGetTensorShapeElementCount, _In_ const struct OrtTensorTypeAndShapeInfo* info); struct ONNXValue; /** - * \param out Should be freed by ONNXRuntimeReleaseObject after use + * \param out Should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeGetTensorShapeAndType, _In_ const struct ONNXValue* value, - _Out_ struct ONNXRuntimeTensorTypeAndShapeInfo** out); +ORT_API_STATUS(OrtGetTensorShapeAndType, _In_ const struct ONNXValue* value, + _Out_ struct OrtTensorTypeAndShapeInfo** out); /** * Get the type information of an ONNXValue * \param value - * \param out The returned value should be freed by ONNXRuntimeReleaseObject after use + * \param out The returned value should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeGetTypeInfo, _In_ const struct ONNXValue* value, struct ONNXRuntimeTypeInfo** out); +ORT_API_STATUS(OrtGetTypeInfo, _In_ const struct ONNXValue* value, struct OrtTypeInfo** out); -ONNXRUNTIME_API(enum ONNXRuntimeType, ONNXRuntimeGetValueType, _In_ const struct ONNXValue* value); +ORT_API(enum OrtType, OrtGetValueType, _In_ const struct ONNXValue* value); // -// RuntimeRunOptions +// OrtRunOptions // -struct ONNXRuntimeRunOptions; -typedef struct ONNXRuntimeRunOptions ONNXRuntimeRunOptions; +struct OrtRunOptions; +typedef struct OrtRunOptions OrtRunOptions; /** - * \return A pointer of the newly created object. The pointer should be freed by ONNXRuntimeReleaseObject after use + * \return A pointer of the newly created object. The pointer should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API(ONNXRuntimeRunOptions*, ONNXRuntimeCreateRunOptions); +ORT_API(OrtRunOptions*, OrtCreateRunOptions); -ONNXRUNTIME_API_STATUS(ONNXRuntimeRunOptionsSetRunLogVerbosityLevel, _In_ ONNXRuntimeRunOptions*, unsigned int); -ONNXRUNTIME_API_STATUS(ONNXRuntimeRunOptionsSetRunTag, _In_ ONNXRuntimeRunOptions*, _In_ const char* run_tag); +ORT_API_STATUS(OrtRunOptionsSetRunLogVerbosityLevel, _In_ OrtRunOptions*, unsigned int); +ORT_API_STATUS(OrtRunOptionsSetRunTag, _In_ OrtRunOptions*, _In_ const char* run_tag); -ONNXRUNTIME_API(unsigned int, ONNXRuntimeRunOptionsGetRunLogVerbosityLevel, _In_ ONNXRuntimeRunOptions*); -ONNXRUNTIME_API(const char*, ONNXRuntimeRunOptionsGetRunTag, _In_ ONNXRuntimeRunOptions*); +ORT_API(unsigned int, OrtRunOptionsGetRunLogVerbosityLevel, _In_ OrtRunOptions*); +ORT_API(const char*, OrtRunOptionsGetRunTag, _In_ OrtRunOptions*); -// set a flag so that any running ONNXRuntimeRunInference* calls that are using this instance of ONNXRuntimeRunOptions +// set a flag so that any running OrtRunInference* calls that are using this instance of ORtRunOptions // will exit as soon as possible if the flag is true. -ONNXRUNTIME_API(void, ONNXRuntimeRunOptionsSetTerminate, _In_ ONNXRuntimeRunOptions*, _In_ bool value); +ORT_API(void, OrtRunOptionsSetTerminate, _In_ OrtRunOptions*, _In_ bool value); -DEFINE_RUNTIME_CLASS(ONNXRuntimeProvider); +DEFINE_RUNTIME_CLASS(OrtProvider); /** * Just like the IUnknown interface in COM - * Every type inherented from ONNXObject should be deleted by ONNXRuntimeReleaseObject(...). + * Every type inherented from ONNXObject should be deleted by OrtReleaseObject(...). */ typedef struct ONNXObject { ///returns the new reference count. - uint32_t(ONNXRUNTIME_API_CALL* AddRef)(void* this_); + uint32_t(ORT_API_CALL* AddRef)(void* this_); ///returns the new reference count. - uint32_t(ONNXRUNTIME_API_CALL* Release)(void* this_); + uint32_t(ORT_API_CALL* Release)(void* this_); //TODO: implement QueryInterface? } ONNXObject; @@ -249,7 +249,7 @@ typedef struct ONNXObject { * Before calling this function, caller should make sure current ref count > 0 * \return the new reference count */ -ONNXRUNTIME_API(uint32_t, ONNXRuntimeAddRefToObject, _In_ void* ptr); +ORT_API(uint32_t, OrtAddRefToObject, _In_ void* ptr); /** * @@ -258,160 +258,159 @@ ONNXRUNTIME_API(uint32_t, ONNXRuntimeAddRefToObject, _In_ void* ptr); * \param ptr Can be NULL. If it's NULL, this function will return zero. * \return the new reference count. */ -ONNXRUNTIME_API(uint32_t, ONNXRuntimeReleaseObject, _Inout_opt_ void* ptr); +ORT_API(uint32_t, OrtReleaseObject, _Inout_opt_ void* ptr); //Inherented from ONNXObject -typedef struct ONNXRuntimeProviderFactoryInterface { +typedef struct OrtProviderFactoryInterface { ONNXObject parent; - ONNXStatus*(ONNXRUNTIME_API_CALL* CreateProvider)(void* this_, ONNXRuntimeProvider** out); -} ONNXRuntimeProviderFactoryInterface; + ONNXStatus*(ORT_API_CALL* CreateProvider)(void* this_, OrtProvider** out); +} OrtProviderFactoryInterface; -struct ONNXRuntimeSessionOptions; -typedef struct ONNXRuntimeSessionOptions ONNXRuntimeSessionOptions; +struct OrtSessionOptions; +typedef struct OrtSessionOptions OrtSessionOptions; /** - * \return A pointer of the newly created object. The pointer should be freed by ONNXRuntimeReleaseObject after use + * \return A pointer of the newly created object. The pointer should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API(ONNXRuntimeSessionOptions*, ONNXRuntimeCreateSessionOptions, void); +ORT_API(OrtSessionOptions*, OrtCreateSessionOptions, void); -/// create a copy of an existing ONNXRuntimeSessionOptions -ONNXRUNTIME_API(ONNXRuntimeSessionOptions*, ONNXRuntimeCloneSessionOptions, ONNXRuntimeSessionOptions*); -ONNXRUNTIME_API(void, ONNXRuntimeEnableSequentialExecution, _In_ ONNXRuntimeSessionOptions* options); -ONNXRUNTIME_API(void, ONNXRuntimeDisableSequentialExecution, _In_ ONNXRuntimeSessionOptions* options); +/// create a copy of an existing OrtSessionOptions +ORT_API(OrtSessionOptions*, OrtCloneSessionOptions, OrtSessionOptions*); +ORT_API(void, OrtEnableSequentialExecution, _In_ OrtSessionOptions* options); +ORT_API(void, OrtDisableSequentialExecution, _In_ OrtSessionOptions* options); // enable profiling for this session. -ONNXRUNTIME_API(void, ONNXRuntimeEnableProfiling, _In_ ONNXRuntimeSessionOptions* options, _In_ const char* profile_file_prefix); -ONNXRUNTIME_API(void, ONNXRuntimeDisableProfiling, _In_ ONNXRuntimeSessionOptions* options); +ORT_API(void, OrtEnableProfiling, _In_ OrtSessionOptions* options, _In_ const char* profile_file_prefix); +ORT_API(void, OrtDisableProfiling, _In_ OrtSessionOptions* options); // enable the memory pattern optimization. // The idea is if the input shapes are the same, we could trace the internal memory allocation // and generate a memory pattern for future request. So next time we could just do one allocation // with a big chunk for all the internal memory allocation. -ONNXRUNTIME_API(void, ONNXRuntimeEnableMemPattern, _In_ ONNXRuntimeSessionOptions* options); -ONNXRUNTIME_API(void, ONNXRuntimeDisableMemPattern, _In_ ONNXRuntimeSessionOptions* options); +ORT_API(void, OrtEnableMemPattern, _In_ OrtSessionOptions* options); +ORT_API(void, OrtDisableMemPattern, _In_ OrtSessionOptions* options); // enable the memory arena on CPU // Arena may pre-allocate memory for future usage. // set this option to false if you don't want it. -ONNXRUNTIME_API(void, ONNXRuntimeEnableCpuMemArena, _In_ ONNXRuntimeSessionOptions* options); -ONNXRUNTIME_API(void, ONNXRuntimeDisableCpuMemArena, _In_ ONNXRuntimeSessionOptions* options); +ORT_API(void, OrtEnableCpuMemArena, _In_ OrtSessionOptions* options); +ORT_API(void, OrtDisableCpuMemArena, _In_ OrtSessionOptions* options); ///< logger id to use for session output -ONNXRUNTIME_API(void, ONNXRuntimeSetSessionLogId, _In_ ONNXRuntimeSessionOptions* options, const char* logid); +ORT_API(void, OrtSetSessionLogId, _In_ OrtSessionOptions* options, const char* logid); ///< applies to session load, initialization, etc -ONNXRUNTIME_API(void, ONNXRuntimeSetSessionLogVerbosityLevel, _In_ ONNXRuntimeSessionOptions* options, uint32_t session_log_verbosity_level); +ORT_API(void, OrtSetSessionLogVerbosityLevel, _In_ OrtSessionOptions* options, uint32_t session_log_verbosity_level); ///How many threads in the session thread pool. -ONNXRUNTIME_API(int, ONNXRuntimeSetSessionThreadPoolSize, _In_ ONNXRuntimeSessionOptions* options, int session_thread_pool_size); +ORT_API(int, OrtSetSessionThreadPoolSize, _In_ OrtSessionOptions* options, int session_thread_pool_size); /** * The order of invocation indicates the preference order as well. In other words call this method * on your most preferred execution provider first followed by the less preferred ones. - * Calling this API is optional in which case onnxruntime will use its internal CPU execution provider. + * Calling this API is optional in which case Ort will use its internal CPU execution provider. */ -ONNXRUNTIME_API(void, ONNXRuntimeSessionOptionsAppendExecutionProvider, _In_ ONNXRuntimeSessionOptions* options, _In_ ONNXRuntimeProviderFactoryInterface** f); +ORT_API(void, OrtSessionOptionsAppendExecutionProvider, _In_ OrtSessionOptions* options, _In_ OrtProviderFactoryInterface** f); -ONNXRUNTIME_API(void, ONNXRuntimeAddCustomOp, _In_ ONNXRuntimeSessionOptions* options, const char* custom_op_path); +ORT_API(void, OrtAddCustomOp, _In_ OrtSessionOptions* options, const char* custom_op_path); -typedef enum ONNXRuntimeAllocatorType { - ONNXRuntimeDeviceAllocator = 0, - ONNXRuntimeArenaAllocator = 1 -} ONNXRuntimeAllocatorType; +typedef enum OrtAllocatorType { + OrtDeviceAllocator = 0, + OrtArenaAllocator = 1 +} OrtAllocatorType; /** memory types for allocator, exec provider specific types should be extended in each provider */ -typedef enum ONNXRuntimeMemType { - ONNXRuntimeMemTypeCPUInput = -2, // Any CPU memory used by non-CPU execution provider - ONNXRuntimeMemTypeCPUOutput = -1, // CPU accessible memory outputted by non-CPU execution provider, i.e. CUDA_PINNED - ONNXRuntimeMemTypeCPU = ONNXRuntimeMemTypeCPUOutput, // temporary CPU accessible memory allocated by non-CPU execution provider, i.e. CUDA_PINNED - ONNXRuntimeMemTypeDefault = 0, // the default allocator for execution provider -} ONNXRuntimeMemType; +typedef enum OrtMemType { + OrtMemTypeCPUInput = -2, // Any CPU memory used by non-CPU execution provider + OrtMemTypeCPUOutput = -1, // CPU accessible memory outputted by non-CPU execution provider, i.e. CUDA_PINNED + OrtMemTypeCPU = OrtMemTypeCPUOutput, // temporary CPU accessible memory allocated by non-CPU execution provider, i.e. CUDA_PINNED + OrtMemTypeDefault = 0, // the default allocator for execution provider +} OrtMemType; -DEFINE_RUNTIME_CLASS(ONNXRuntimeAllocatorInfo); +DEFINE_RUNTIME_CLASS(OrtAllocatorInfo); -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateAllocatorInfo, _In_ const char* name1, enum ONNXRuntimeAllocatorType type, int id1, enum ONNXRuntimeMemType mem_type1, _Out_ ONNXRuntimeAllocatorInfo** out); +ORT_API_STATUS(OrtCreateAllocatorInfo, _In_ const char* name1, enum OrtAllocatorType type, int id1, enum OrtMemType mem_type1, _Out_ OrtAllocatorInfo** out); /** * Test if two allocation info are equal * \return 0, equal. zero, not equal */ -ONNXRUNTIME_API(int, ONNXRuntimeCompareAllocatorInfo, _In_ const ONNXRuntimeAllocatorInfo* info1, _In_ const ONNXRuntimeAllocatorInfo* info2) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API(int, OrtCompareAllocatorInfo, _In_ const OrtAllocatorInfo* info1, _In_ const OrtAllocatorInfo* info2) +ORT_ALL_ARGS_NONNULL; /** * Do not free the returned value */ -ONNXRUNTIME_API(const char*, ONNXRuntimeAllocatorInfoGetName, _In_ ONNXRuntimeAllocatorInfo* ptr); -ONNXRUNTIME_API(int, ONNXRuntimeAllocatorInfoGetId, _In_ ONNXRuntimeAllocatorInfo* ptr); -ONNXRUNTIME_API(ONNXRuntimeMemType, ONNXRuntimeAllocatorInfoGetMemType, _In_ ONNXRuntimeAllocatorInfo* ptr); -ONNXRUNTIME_API(ONNXRuntimeAllocatorType, ONNXRuntimeAllocatorInfoGetType, _In_ ONNXRuntimeAllocatorInfo* ptr); +ORT_API(const char*, OrtAllocatorInfoGetName, _In_ OrtAllocatorInfo* ptr); +ORT_API(int, OrtAllocatorInfoGetId, _In_ OrtAllocatorInfo* ptr); +ORT_API(OrtMemType, OrtAllocatorInfoGetMemType, _In_ OrtAllocatorInfo* ptr); +ORT_API(OrtAllocatorType, OrtAllocatorInfoGetType, _In_ OrtAllocatorInfo* ptr); //inherented from ONNXObject -typedef struct ONNXRuntimeAllocatorInteface { +typedef struct OrtAllocatorInterface { struct ONNXObject parent; - void*(ONNXRUNTIME_API_CALL* Alloc)(void* this_, size_t size); - void(ONNXRUNTIME_API_CALL* Free)(void* this_, void* p); - const struct ONNXRuntimeAllocatorInfo*(ONNXRUNTIME_API_CALL* Info)(const void* this_); -} ONNXRuntimeAllocatorInteface; - -typedef ONNXRuntimeAllocatorInteface* ONNXRuntimeAllocator; - -ONNXRUNTIME_API(void*, ONNXRuntimeAllocatorAlloc, _Inout_ ONNXRuntimeAllocator* ptr, size_t size); -ONNXRUNTIME_API(void, ONNXRuntimeAllocatorFree, _Inout_ ONNXRuntimeAllocator* ptr, void* p); -ONNXRUNTIME_API(const struct ONNXRuntimeAllocatorInfo*, ONNXRuntimeAllocatorGetInfo, _In_ const ONNXRuntimeAllocator* ptr); - -struct ONNXRuntimeEnv; -typedef struct ONNXRuntimeEnv ONNXRuntimeEnv; - -typedef enum ONNXRuntimeLoggingLevel { - ONNXRUNTIME_LOGGING_LEVEL_kVERBOSE = 0, - ONNXRUNTIME_LOGGING_LEVEL_kINFO = 1, - ONNXRUNTIME_LOGGING_LEVEL_kWARNING = 2, - ONNXRUNTIME_LOGGING_LEVEL_kERROR = 3, - ONNXRUNTIME_LOGGING_LEVEL_kFATAL = 4 -} ONNXRuntimeLoggingLevel; - -typedef void(ONNXRUNTIME_API_CALL* ONNXRuntimeLoggingFunction)( - void* param, ONNXRuntimeLoggingLevel severity, const char* category, const char* logid, const char* code_location, + void*(ORT_API_CALL* Alloc)(void* this_, size_t size); + void(ORT_API_CALL* Free)(void* this_, void* p); + const struct OrtAllocatorInfo*(ORT_API_CALL* Info)(const void* this_); +} OrtAllocatorInterface; + +typedef OrtAllocatorInterface* OrtAllocator; + +ORT_API(void*, OrtAllocatorAlloc, _Inout_ OrtAllocator* ptr, size_t size); +ORT_API(void, OrtAllocatorFree, _Inout_ OrtAllocator* ptr, void* p); +ORT_API(const struct OrtAllocatorInfo*, OrtAllocatorGetInfo, _In_ const OrtAllocator* ptr); + +struct OrtEnv; +typedef struct OrtEnv OrtEnv; + +typedef enum OrtLoggingLevel { + ORT_LOGGING_LEVEL_kVERBOSE = 0, + ORT_LOGGING_LEVEL_kINFO = 1, + ORT_LOGGING_LEVEL_kWARNING = 2, + ORT_LOGGING_LEVEL_kERROR = 3, + ORT_LOGGING_LEVEL_kFATAL = 4 +} OrtLoggingLevel; + +typedef void(ORT_API_CALL* OrtLoggingFunction)( + void* param, OrtLoggingLevel severity, const char* category, const char* logid, const char* code_location, const char* message); /** - * ONNXRuntimeEnv is process-wise. For each process, only one ONNXRuntimeEnv can be created. Don't do it multiple times - * \param out Should be freed by `ONNXRuntimeReleaseObject` after use + * OrtEnv is process-wise. For each process, only one OrtEnv can be created. Don't do it multiple times + * \param out Should be freed by `OrtReleaseObject` after use */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeInitialize, ONNXRuntimeLoggingLevel default_warning_level, _In_ const char* logid, - _Out_ ONNXRuntimeEnv** out) -ONNXRUNTIME_ALL_ARGS_NONNULL; +ORT_API_STATUS(OrtInitialize, OrtLoggingLevel default_warning_level, _In_ const char* logid, _Out_ OrtEnv** out) +ORT_ALL_ARGS_NONNULL; /** - * ONNXRuntimeEnv is process-wise. For each process, only one ONNXRuntimeEnv can be created. Don't do it multiple times - * \param out Should be freed by `ONNXRuntimeReleaseObject` after use + * OrtEnv is process-wise. For each process, only one OrtEnv can be created. Don't do it multiple times + * \param out Should be freed by `OrtReleaseObject` after use */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeInitializeWithCustomLogger, ONNXRuntimeLoggingFunction logging_function, - _In_opt_ void* logger_param, ONNXRuntimeLoggingLevel default_warning_level, - _In_ const char* logid, - _Out_ ONNXRuntimeEnv** out); +ORT_API_STATUS(OrtInitializeWithCustomLogger, OrtLoggingFunction logging_function, + _In_opt_ void* logger_param, OrtLoggingLevel default_warning_level, + _In_ const char* logid, + _Out_ OrtEnv** out); DEFINE_RUNTIME_CLASS(ONNXSession); //TODO: document the path separator convention? '/' vs '\' //TODO: should specify the access characteristics of model_path. Is this read only during the -//execution of ONNXRuntimeCreateInferenceSession, or does the ONNXSession retain a handle to the file/directory +//execution of OrtCreateInferenceSession, or does the ONNXSession retain a handle to the file/directory //and continue to access throughout the ONNXSession lifetime? // What sort of access is needed to model_path : read or read/write? //TODO: allow loading from an in-memory byte-array #ifdef _WIN32 -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateInferenceSession, _In_ ONNXRuntimeEnv* env, _In_ const wchar_t* model_path, - _In_ const ONNXRuntimeSessionOptions* options, _Out_ ONNXSession** out); +ORT_API_STATUS(OrtCreateInferenceSession, _In_ OrtEnv* env, _In_ const wchar_t* model_path, + _In_ const OrtSessionOptions* options, _Out_ ONNXSession** out); #else -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateInferenceSession, _In_ ONNXRuntimeEnv* env, _In_ const char* model_path, - _In_ const ONNXRuntimeSessionOptions* options, _Out_ ONNXSession** out); +ORT_API_STATUS(OrtCreateInferenceSession, _In_ OrtEnv* env, _In_ const char* model_path, + _In_ const OrtSessionOptions* options, _Out_ ONNXSession** out); #endif DEFINE_RUNTIME_CLASS(ONNXValue); -///Call ONNXRuntimeReleaseObject to release the returned value -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateDefaultAllocator, _Out_ ONNXRuntimeAllocator** out); +///Call OrtReleaseObject to release the returned value +ORT_API_STATUS(OrtCreateDefaultAllocator, _Out_ OrtAllocator** out); /** * Create a tensor from an allocator. ReleaseONNXValue will also release the buffer inside the output value @@ -419,81 +418,81 @@ ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateDefaultAllocator, _Out_ ONNXRuntimeAlloc * calling ReleaseONNXValue * \param type must be one of TENSOR_ELEMENT_DATA_TYPE_xxxx */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateTensorAsONNXValue, _Inout_ ONNXRuntimeAllocator* allocator, - _In_ const size_t* shape, size_t shape_len, OnnxRuntimeTensorElementDataType type, - _Out_ ONNXValue** out); +ORT_API_STATUS(OrtCreateTensorAsONNXValue, _Inout_ OrtAllocator* allocator, + _In_ const size_t* shape, size_t shape_len, OrtTensorElementDataType type, + _Out_ ONNXValue** out); /** * Create a tensor with user's buffer. You can fill the buffer either before calling this function or after. * p_data is owned by caller. ReleaseONNXValue won't release p_data. * \param out Should be freed by calling ReleaseONNXValue */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeCreateTensorWithDataAsONNXValue, _In_ const ONNXRuntimeAllocatorInfo* info, - _In_ void* p_data, size_t p_data_len, _In_ const size_t* shape, size_t shape_len, - OnnxRuntimeTensorElementDataType type, _Out_ ONNXValue** out); +ORT_API_STATUS(OrtCreateTensorWithDataAsONNXValue, _In_ const OrtAllocatorInfo* info, + _In_ void* p_data, size_t p_data_len, _In_ const size_t* shape, size_t shape_len, + OrtTensorElementDataType type, _Out_ ONNXValue** out); /// This function doesn't work with string tensor /// this is a no-copy method whose pointer is only valid until the backing ONNXValue is free'd. -ONNXRUNTIME_API_STATUS(ONNXRuntimeGetTensorMutableData, _Inout_ ONNXValue* value, _Out_ void** out); +ORT_API_STATUS(OrtGetTensorMutableData, _Inout_ ONNXValue* value, _Out_ void** out); /** * Test if an ONNXValue is a tensor * \return zero, false. non-zero true */ -ONNXRUNTIME_API(int, ONNXRuntimeIsTensor, _In_ const ONNXValue* value); +ORT_API(int, OrtIsTensor, _In_ const ONNXValue* value); /** - * \param value A tensor created from ONNXRuntimeCreateTensor*** function. + * \param value A tensor created from OrtCreateTensor*** function. * \param s each A string array. Each string in this array must be null terminated. * \param s_len length of s */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeFillStringTensor, _In_ ONNXValue* value, _In_ const char* const* s, size_t s_len); +ORT_API_STATUS(OrtFillStringTensor, _In_ ONNXValue* value, _In_ const char* const* s, size_t s_len); /** - * \param value A tensor created from ONNXRuntimeCreateTensor*** function. + * \param value A tensor created from OrtCreateTensor*** function. * \param len total data length, not including the trailing '\0' chars. */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeGetStringTensorDataLength, _In_ const ONNXValue* value, _Out_ size_t* len); +ORT_API_STATUS(OrtGetStringTensorDataLength, _In_ const ONNXValue* value, _Out_ size_t* len); /** * \param s string contents. Each string is NOT null-terminated. - * \param value A tensor created from ONNXRuntimeCreateTensor*** function. - * \param s_len total data length, get it from ONNXRuntimeGetStringTensorDataLength + * \param value A tensor created from OrtCreateTensor*** function. + * \param s_len total data length, get it from OrtGetStringTensorDataLength */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeGetStringTensorContent, _In_ const ONNXValue* value, _Out_ void* s, size_t s_len, - _Out_ size_t* offsets, size_t offsets_len); +ORT_API_STATUS(OrtGetStringTensorContent, _In_ const ONNXValue* value, _Out_ void* s, size_t s_len, + _Out_ size_t* offsets, size_t offsets_len); DEFINE_RUNTIME_CLASS(ONNXValueList); -ONNXRUNTIME_API_STATUS(ONNXRuntimeRunInference, _Inout_ ONNXSession* sess, - _In_ ONNXRuntimeRunOptions* run_options, - _In_ const char* const* input_names, _In_ const ONNXValue* const* input, size_t input_len, - _In_ const char* const* output_names, size_t output_names_len, _Out_ ONNXValue** output); +ORT_API_STATUS(OrtRunInference, _Inout_ ONNXSession* sess, + _In_ OrtRunOptions* run_options, + _In_ const char* const* input_names, _In_ const ONNXValue* const* input, size_t input_len, + _In_ const char* const* output_names, size_t output_names_len, _Out_ ONNXValue** output); -ONNXRUNTIME_API_STATUS(ONNXRuntimeInferenceSessionGetInputCount, _In_ const ONNXSession* sess, _Out_ size_t* out); -ONNXRUNTIME_API_STATUS(ONNXRuntimeInferenceSessionGetOutputCount, _In_ const ONNXSession* sess, _Out_ size_t* out); +ORT_API_STATUS(OrtInferenceSessionGetInputCount, _In_ const ONNXSession* sess, _Out_ size_t* out); +ORT_API_STATUS(OrtInferenceSessionGetOutputCount, _In_ const ONNXSession* sess, _Out_ size_t* out); /** - * \param out should be freed by ONNXRuntimeReleaseObject after use + * \param out should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeInferenceSessionGetInputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct ONNXRuntimeTypeInfo** out); +ORT_API_STATUS(OrtInferenceSessionGetInputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct OrtTypeInfo** out); /** - * \param out should be freed by ONNXRuntimeReleaseObject after use + * \param out should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API_STATUS(ONNXRuntimeInferenceSessionGetOutputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct ONNXRuntimeTypeInfo** out); +ORT_API_STATUS(OrtInferenceSessionGetOutputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct OrtTypeInfo** out); -ONNXRUNTIME_API_STATUS(ONNXRuntimeInferenceSessionGetInputName, _In_ const ONNXSession* sess, size_t index, - _Inout_ ONNXRuntimeAllocator* allocator, _Out_ char** value); -ONNXRUNTIME_API_STATUS(ONNXRuntimeInferenceSessionGetOutputName, _In_ const ONNXSession* sess, size_t index, - _Inout_ ONNXRuntimeAllocator* allocator, _Out_ char** value); +ORT_API_STATUS(OrtInferenceSessionGetInputName, _In_ const ONNXSession* sess, size_t index, + _Inout_ OrtAllocator* allocator, _Out_ char** value); +ORT_API_STATUS(OrtInferenceSessionGetOutputName, _In_ const ONNXSession* sess, size_t index, + _Inout_ OrtAllocator* allocator, _Out_ char** value); -ONNXRUNTIME_API_STATUS(ONNXRuntimeTensorProtoToONNXValue, _Inout_ ONNXRuntimeAllocator* allocator, - _In_ const void* input, int input_len, _Out_ ONNXValue** out); +ORT_API_STATUS(OrtTensorProtoToONNXValue, _Inout_ OrtAllocator* allocator, + _In_ const void* input, int input_len, _Out_ ONNXValue** out); /** - * Deprecated. Please use ONNXRuntimeReleaseObject + * Deprecated. Please use OrtReleaseObject */ -ONNXRUNTIME_API(void, ReleaseONNXEnv, ONNXRuntimeEnv* env); +ORT_API(void, ReleaseONNXEnv, OrtEnv* env); #ifdef __cplusplus } diff --git a/include/onnxruntime/core/session/onnxruntime_cxx_api.h b/include/onnxruntime/core/session/onnxruntime_cxx_api.h index f39c01295a536..d9503688ee125 100644 --- a/include/onnxruntime/core/session/onnxruntime_cxx_api.h +++ b/include/onnxruntime/core/session/onnxruntime_cxx_api.h @@ -9,26 +9,26 @@ #include //TODO: encode error code in the message? -#define ONNXRUNTIME_THROW_ON_ERROR(expr) \ - do { \ - ONNXStatus* onnx_status = (expr); \ - if (onnx_status != nullptr) { \ - std::string onnx_runtime_error_message = ONNXRuntimeGetErrorMessage(onnx_status); \ - ReleaseONNXStatus(onnx_status); \ - throw std::runtime_error(onnx_runtime_error_message); \ - } \ +#define ORT_THROW_ON_ERROR(expr) \ + do { \ + ONNXStatus* onnx_status = (expr); \ + if (onnx_status != nullptr) { \ + std::string ort_error_message = OrtGetErrorMessage(onnx_status); \ + ReleaseONNXStatus(onnx_status); \ + throw std::runtime_error(ort_error_message); \ + } \ } while (0); -#define ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(NAME) \ - decltype(ONNXRuntime##NAME(value.get())) NAME() { \ - return ONNXRuntime##NAME(value.get()); \ +#define ORT_REDIRECT_SIMPLE_FUNCTION_CALL(NAME) \ + decltype(Ort##NAME(value.get())) NAME() { \ + return Ort##NAME(value.get()); \ } #define DECLARE_DEFAULT_DELETER_FOR_ONNX_OBJECT(TYPE_NAME) \ namespace std { \ template <> \ - struct default_delete { \ - void operator()(ONNXRuntime##TYPE_NAME* ptr) { \ + struct default_delete { \ + void operator()(Ort##TYPE_NAME* ptr) { \ (*reinterpret_cast(ptr))->Release(ptr); \ } \ }; \ @@ -47,32 +47,32 @@ DECLARE_DEFAULT_DELETER_FOR_ONNX_OBJECT(ProviderFactoryInterface*); namespace onnxruntime { class SessionOptionsWrapper { private: - std::unique_ptr value; - ONNXRuntimeEnv* env_; - SessionOptionsWrapper(_In_ ONNXRuntimeEnv* env, ONNXRuntimeSessionOptions* p) : value(p, ONNXRuntimeReleaseObject), env_(env){}; + std::unique_ptr value; + OrtEnv* env_; + SessionOptionsWrapper(_In_ OrtEnv* env, OrtSessionOptions* p) : value(p, OrtReleaseObject), env_(env){}; public: //TODO: for the input arg, should we call addref here? - SessionOptionsWrapper(_In_ ONNXRuntimeEnv* env) : value(ONNXRuntimeCreateSessionOptions(), ONNXRuntimeReleaseObject), env_(env){}; - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(EnableSequentialExecution) - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(DisableSequentialExecution) - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(DisableProfiling) - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(EnableMemPattern) - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(DisableMemPattern) - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(EnableCpuMemArena) - ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL(DisableCpuMemArena) + SessionOptionsWrapper(_In_ OrtEnv* env) : value(OrtCreateSessionOptions(), OrtReleaseObject), env_(env){}; + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(EnableSequentialExecution) + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(DisableSequentialExecution) + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(DisableProfiling) + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(EnableMemPattern) + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(DisableMemPattern) + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(EnableCpuMemArena) + ORT_REDIRECT_SIMPLE_FUNCTION_CALL(DisableCpuMemArena) void EnableProfiling(_In_ const char* profile_file_prefix) { - ONNXRuntimeEnableProfiling(value.get(), profile_file_prefix); + OrtEnableProfiling(value.get(), profile_file_prefix); } void SetSessionLogId(const char* logid) { - ONNXRuntimeSetSessionLogId(value.get(), logid); + OrtSetSessionLogId(value.get(), logid); } void SetSessionLogVerbosityLevel(uint32_t session_log_verbosity_level) { - ONNXRuntimeSetSessionLogVerbosityLevel(value.get(), session_log_verbosity_level); + OrtSetSessionLogVerbosityLevel(value.get(), session_log_verbosity_level); } void SetSessionThreadPoolSize(int session_thread_pool_size) { - ONNXRuntimeSetSessionThreadPoolSize(value.get(), session_thread_pool_size); + OrtSetSessionThreadPoolSize(value.get(), session_thread_pool_size); } /** @@ -80,49 +80,49 @@ class SessionOptionsWrapper { * on your most preferred execution provider first followed by the less preferred ones. * Calling this API is optional in which case onnxruntime will use its internal CPU execution provider. */ - void AppendExecutionProvider(_In_ ONNXRuntimeProviderFactoryInterface** f) { - ONNXRuntimeSessionOptionsAppendExecutionProvider(value.get(), f); + void AppendExecutionProvider(_In_ OrtProviderFactoryInterface** f) { + OrtSessionOptionsAppendExecutionProvider(value.get(), f); } SessionOptionsWrapper clone() const { - ONNXRuntimeSessionOptions* p = ONNXRuntimeCloneSessionOptions(value.get()); + OrtSessionOptions* p = OrtCloneSessionOptions(value.get()); return SessionOptionsWrapper(env_, p); } #ifdef _WIN32 - ONNXSession* ONNXRuntimeCreateInferenceSession(_In_ const wchar_t* model_path) { + ONNXSession* OrtCreateInferenceSession(_In_ const wchar_t* model_path) { ONNXSession* ret; - ONNXRUNTIME_THROW_ON_ERROR(::ONNXRuntimeCreateInferenceSession(env_, model_path, value.get(), &ret)); + ORT_THROW_ON_ERROR(::OrtCreateInferenceSession(env_, model_path, value.get(), &ret)); return ret; } #else - ONNXSession* ONNXRuntimeCreateInferenceSession(_In_ const char* model_path) { + ONNXSession* OrtCreateInferenceSession(_In_ const char* model_path) { ONNXSession* ret; - ONNXRUNTIME_THROW_ON_ERROR(::ONNXRuntimeCreateInferenceSession(env_, model_path, value.get(), &ret)); + ORT_THROW_ON_ERROR(::OrtCreateInferenceSession(env_, model_path, value.get(), &ret)); return ret; } #endif void AddCustomOp(_In_ const char* custom_op_path) { - ONNXRuntimeAddCustomOp(value.get(), custom_op_path); + OrtAddCustomOp(value.get(), custom_op_path); } }; -inline ONNXValue* ONNXRuntimeCreateTensorAsONNXValue(_Inout_ ONNXRuntimeAllocator* env, const std::vector& shape, OnnxRuntimeTensorElementDataType type) { +inline ONNXValue* OrtCreateTensorAsONNXValue(_Inout_ OrtAllocator* env, const std::vector& shape, OrtTensorElementDataType type) { ONNXValue* ret; - ONNXRUNTIME_THROW_ON_ERROR(::ONNXRuntimeCreateTensorAsONNXValue(env, shape.data(), shape.size(), type, &ret)); + ORT_THROW_ON_ERROR(::OrtCreateTensorAsONNXValue(env, shape.data(), shape.size(), type, &ret)); return ret; } -inline ONNXValue* ONNXRuntimeCreateTensorWithDataAsONNXValue(_In_ const ONNXRuntimeAllocatorInfo* info, _In_ void* p_data, size_t p_data_len, const std::vector& shape, OnnxRuntimeTensorElementDataType type) { +inline ONNXValue* OrtCreateTensorWithDataAsONNXValue(_In_ const OrtAllocatorInfo* info, _In_ void* p_data, size_t p_data_len, const std::vector& shape, OrtTensorElementDataType type) { ONNXValue* ret; - ONNXRUNTIME_THROW_ON_ERROR(::ONNXRuntimeCreateTensorWithDataAsONNXValue(info, p_data, p_data_len, shape.data(), shape.size(), type, &ret)); + ORT_THROW_ON_ERROR(::OrtCreateTensorWithDataAsONNXValue(info, p_data, p_data_len, shape.data(), shape.size(), type, &ret)); return ret; } -inline std::vector GetTensorShape(const ONNXRuntimeTensorTypeAndShapeInfo* info) { - size_t dims = ONNXRuntimeGetNumOfDimensions(info); +inline std::vector GetTensorShape(const OrtTensorTypeAndShapeInfo* info) { + size_t dims = OrtGetNumOfDimensions(info); std::vector ret(dims); - ONNXRuntimeGetDimensions(info, ret.data(), ret.size()); + OrtGetDimensions(info, ret.data(), ret.size()); return ret; } } // namespace onnxruntime -#undef ONNXRUNTIME_REDIRECT_SIMPLE_FUNCTION_CALL +#undef ORT_REDIRECT_SIMPLE_FUNCTION_CALL diff --git a/onnxruntime/contrib_ops/cpu/attnlstm/bahdanau_attention.cc b/onnxruntime/contrib_ops/cpu/attnlstm/bahdanau_attention.cc index c0ec00c6b88a7..932ac263f8e22 100644 --- a/onnxruntime/contrib_ops/cpu/attnlstm/bahdanau_attention.cc +++ b/onnxruntime/contrib_ops/cpu/attnlstm/bahdanau_attention.cc @@ -22,7 +22,7 @@ BahdanauAttention::BahdanauAttention(AllocatorPtr allocator, const logging::L processed_query_ = Allocate(allocator_, batch_size_ * attn_depth_, processed_query_ptr_, true); mem_seq_lengths_ = Allocate(allocator_, batch_size_, mem_seq_lengths_ptr_, true); - ONNXRUNTIME_ENFORCE(!normalize_, "not support normalize yet."); + ORT_ENFORCE(!normalize_, "not support normalize yet."); } template @@ -68,7 +68,7 @@ void BahdanauAttention::PrepareMemory( for (int b = 0; b < batch_size_; b++) { int mem_steps = mem_seq_lengths_[b]; - ONNXRUNTIME_ENFORCE(mem_steps <= max_memory_steps_ && mem_steps > 0, + ORT_ENFORCE(mem_steps <= max_memory_steps_ && mem_steps > 0, "Real memory steps ", mem_steps, " is not in (0, ", max_memory_steps_, "]"); } diff --git a/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.cc b/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.cc index fea43a0db2c35..4de2be7db8b16 100644 --- a/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.cc +++ b/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.cc @@ -43,9 +43,9 @@ DeepCpuAttnLstmOp::Compute(OpKernelContext* context) const { else if (data_type == DataTypeImpl::GetType()) { /* Need to update all the helpers to support double... status = ComputeImpl(*context); */ - ONNXRUNTIME_NOT_IMPLEMENTED("LSTM operator does not support double yet"); + ORT_NOT_IMPLEMENTED("LSTM operator does not support double yet"); } else - ONNXRUNTIME_THROW("Invalid data type for LSTM operator of ", data_type); + ORT_THROW("Invalid data type for LSTM operator of ", data_type); return status; } @@ -103,7 +103,7 @@ Status DeepCpuAttnLstmOp::ComputeImpl(OpKernelContext& context) const { Status status = ValidateInputs( X, W, R, B, sequence_lens, initial_h, initial_c, P, batch_size, am_query_layer_weights, am_memory_layer_weights, am_v_weights, attn_memory, attn_memory_seq_lens, attn_layer_weights); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); const int max_memory_step = gsl::narrow(attn_memory.Shape()[1]); const int memory_depth = gsl::narrow(am_memory_layer_weights.Shape()[1]); @@ -128,7 +128,7 @@ Status DeepCpuAttnLstmOp::ComputeImpl(OpKernelContext& context) const { AllocatorPtr alloc; status = context.GetTempSpaceAllocator(&alloc); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); gsl::span input_weights = W.DataAsSpan(); gsl::span recurrent_weights = R.DataAsSpan(); @@ -333,13 +333,13 @@ static Status ValidateRnnInputsWithExtraInputFromState( int64_t input_size = X_shape[2] + extra_input_size; if (X_shape.NumDimensions() != 3) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input X must have 3 dimensions only. Actual:", X_shape); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input X must have 3 dimensions only. Actual:", X_shape); if (W_shape.NumDimensions() != 3 || W_shape[0] != num_directions || W_shape[1] != hidden_size * WRB_dim_1_multipler || W_shape[2] != input_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input W must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input W must have shape {", num_directions, ",", WRB_dim_1_multipler, "*", hidden_size, ",", input_size, "}. Actual:", W_shape); @@ -347,7 +347,7 @@ static Status ValidateRnnInputsWithExtraInputFromState( R_shape[0] != num_directions || R_shape[1] != hidden_size * WRB_dim_1_multipler || R_shape[2] != hidden_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input R must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input R must have shape {", num_directions, ",", WRB_dim_1_multipler, "*", hidden_size, ",", hidden_size, "}. Actual:", R_shape); @@ -356,7 +356,7 @@ static Status ValidateRnnInputsWithExtraInputFromState( if (B_shape.NumDimensions() != 2 || B_shape[0] != num_directions || B_shape[1] != 2 * WRB_dim_1_multipler * hidden_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input B must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input B must have shape {", num_directions, ",", 2 * WRB_dim_1_multipler, "*", hidden_size, "}. Actual:", B_shape); } @@ -364,7 +364,7 @@ static Status ValidateRnnInputsWithExtraInputFromState( auto& sequence_lens_shape = sequence_lens->Shape(); if (sequence_lens_shape.NumDimensions() != 1 || sequence_lens_shape[0] != batch_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input sequence_lens must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input sequence_lens must have shape {", batch_size, "}. Actual:", sequence_lens_shape); } @@ -372,7 +372,7 @@ static Status ValidateRnnInputsWithExtraInputFromState( if (std::any_of(sequence_len_entries.cbegin(), sequence_len_entries.cend(), [seq_length](int len) { return len <= 0 || len > seq_length; })) { - return ONNXRUNTIME_MAKE_STATUS( + return ORT_MAKE_STATUS( ONNXRUNTIME, INVALID_ARGUMENT, "Invalid value/s in sequence_lens. All values must be > 0 and < seq_length. seq_length=", seq_length); } @@ -386,7 +386,7 @@ static Status ValidateRnnInputsWithExtraInputFromState( initial_h_shape[1] != batch_size || initial_h_shape[2] != hidden_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_h must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_h must have shape {", num_directions, ",", batch_size, ",", hidden_size, "}. Actual:", initial_h_shape); } @@ -402,7 +402,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( // Check memory of [batch_size, max_memory_step, memory_depth_], its sequence length of [batch_size] auto memory_shape = attn_memory.Shape(); if (memory_shape.NumDimensions() != 3 || memory_shape[0] != batch_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention mechanism memory shape error! Expected: {", batch_size, "}, actural: ", memory_shape); } @@ -411,7 +411,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( if (attn_memory_seq_lens != nullptr) { auto memory_seq_lens_shape = attn_memory_seq_lens->Shape(); if (memory_seq_lens_shape.NumDimensions() != 1 || memory_seq_lens_shape[0] != batch_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention mechanism memory sequence lengths must have shape {", batch_size, "}, actural: ", memory_seq_lens_shape); } @@ -420,7 +420,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( mem_seq_lens_span.cbegin(), mem_seq_lens_span.cend(), [max_memory_step](int len) { return len <= 0 || len > max_memory_step; }); if (item_not_in_range != mem_seq_lens_span.cend()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention mechanism memory sequence lengths value must in (0, ", max_memory_step, "], while ", *item_not_in_range, " found!"); } @@ -431,7 +431,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( if (memory_layer_shape.NumDimensions() != 3 || memory_layer_shape[0] != num_directions_ || memory_layer_shape[1] != memory_depth) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention memory layer weight shape error! Expected:{", num_directions_, ",", memory_depth, ", am_attn_size}, Got:", memory_layer_shape); } @@ -443,7 +443,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( query_layer_shape[0] != num_directions_ || query_layer_shape[1] != hidden_size_ || query_layer_shape[2] != am_attn_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention query layer weight shape error! Expected:{", num_directions_, ", ", hidden_size_, ", ", am_attn_size, "}, Got: ", query_layer_shape); } @@ -453,7 +453,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( if (v_shape.NumDimensions() != 2 || v_shape[0] != num_directions_ || v_shape[1] != am_attn_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention v weight shape error! Expected:{", num_directions_, ", ", am_attn_size, "}. Got: ", v_shape); } @@ -466,7 +466,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( if (attn_layer_shape.NumDimensions() != 3 || attn_layer_shape[0] != num_directions_ || attn_layer_shape[1] != memory_depth + hidden_size_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attention layer weight shape error! Expected: {", num_directions_, ", ", memory_depth + hidden_size_, ", aw_attn_size}. Got:", attn_layer_shape); } @@ -475,7 +475,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( auto status = ValidateRnnInputsWithExtraInputFromState( X, W, R, B, 4, sequence_lens, initial_h, num_directions_, hidden_size_, aw_attn_size); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); if (initial_c != nullptr) { auto& initial_c_shape = initial_c->Shape(); @@ -485,7 +485,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( initial_c_shape[1] != batch_size || initial_c_shape[2] != hidden_size_) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_c must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_c must have shape {", num_directions_, ",", batch_size, ",", hidden_size_, "}. Actual:", initial_c_shape); } @@ -496,7 +496,7 @@ Status DeepCpuAttnLstmOp::ValidateInputs( p_shape[0] != num_directions_ || p_shape[1] != 3 * hidden_size_) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input P must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input P must have shape {", num_directions_, ",", 3 * hidden_size_, "}. Actual:", p_shape); } diff --git a/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h b/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h index fdbb65a5de98c..4ed92c9089bce 100644 --- a/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h +++ b/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h @@ -27,17 +27,17 @@ class DeepCpuAttnLstmOp final : public OpKernel { DeepCpuAttnLstmOp(const OpKernelInfo& info) : OpKernel(info), clip_(info.GetAttrOrDefault("clip", std::numeric_limits::max())) { std::string direction; - ONNXRUNTIME_ENFORCE(info.GetAttr("direction", &direction).IsOK()); + ORT_ENFORCE(info.GetAttr("direction", &direction).IsOK()); int64_t int64_value; - ONNXRUNTIME_ENFORCE(info.GetAttr("hidden_size", &int64_value).IsOK() && int64_value > 0); + ORT_ENFORCE(info.GetAttr("hidden_size", &int64_value).IsOK() && int64_value > 0); hidden_size_ = gsl::narrow(int64_value); // optional attributes std::vector activation_func_names = info.GetAttrsOrDefault("activations"); std::vector activation_func_alphas = info.GetAttrsOrDefault("activation_alpha"); std::vector activation_func_betas = info.GetAttrsOrDefault("activation_beta"); - ONNXRUNTIME_ENFORCE(clip_ > 0.f); + ORT_ENFORCE(clip_ > 0.f); if (info.GetAttr("input_forget", &int64_value).IsOK()) input_forget_ = int64_value != 0; @@ -53,7 +53,7 @@ class DeepCpuAttnLstmOp final : public OpKernel { } } - ONNXRUNTIME_ENFORCE(static_cast(activation_func_names.size()) == num_directions_ * 3); + ORT_ENFORCE(static_cast(activation_func_names.size()) == num_directions_ * 3); activation_funcs_ = ActivationFuncs(activation_func_names, activation_func_alphas, diff --git a/onnxruntime/contrib_ops/cpu/expand_dims.h b/onnxruntime/contrib_ops/cpu/expand_dims.h index f7b320499315d..72930508d68eb 100644 --- a/onnxruntime/contrib_ops/cpu/expand_dims.h +++ b/onnxruntime/contrib_ops/cpu/expand_dims.h @@ -24,7 +24,7 @@ class ExpandDims final : public OpKernel { const Tensor* axis_tensor = context->Input(1); if (axis_tensor == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); - ONNXRUNTIME_ENFORCE(axis_tensor->Shape().IsScalar(), "An axis tensor must be a scalar tensor."); + ORT_ENFORCE(axis_tensor->Shape().IsScalar(), "An axis tensor must be a scalar tensor."); const int64_t axis = static_cast(axis_tensor->template Data()[0]); const Tensor* X = context->Input(0); if (X == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); @@ -32,7 +32,7 @@ class ExpandDims final : public OpKernel { std::vector expanded_shape(X_shape.GetDims()); int64_t X_NumDims = X_shape.Size(); - ONNXRUNTIME_ENFORCE(axis <= X_NumDims && axis >= -X_NumDims, + ORT_ENFORCE(axis <= X_NumDims && axis >= -X_NumDims, "Axis must be within range [", -X_NumDims, ", ", X_NumDims, "].", " Axis is ", axis); if (axis >= 0) { expanded_shape.insert(expanded_shape.begin() + axis, 1); diff --git a/onnxruntime/contrib_ops/cpu/murmur_hash3.cc b/onnxruntime/contrib_ops/cpu/murmur_hash3.cc index 10ed9d407b4aa..bd0007fb4ed61 100644 --- a/onnxruntime/contrib_ops/cpu/murmur_hash3.cc +++ b/onnxruntime/contrib_ops/cpu/murmur_hash3.cc @@ -90,7 +90,6 @@ FORCE_INLINE uint64_t fmix(uint64_t k) { return k; } - namespace onnxruntime { namespace contrib { @@ -108,7 +107,6 @@ ONNX_OPERATOR_KERNEL_EX( MurmurHash3); void MurmurHash3::MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out) const { - const uint8_t* data = reinterpret_cast(key); const int nblocks = len / 4; uint32_t h1 = seed; @@ -161,7 +159,7 @@ void MurmurHash3::MurmurHash3_x86_32(const void* key, int len, uint32_t seed, vo Status MurmurHash3::Compute(OpKernelContext* ctx) const { const Tensor* keys = ctx->Input(0); - ONNXRUNTIME_ENFORCE(keys); + ORT_ENFORCE(keys); const TensorShape& input_shape = keys->Shape(); Tensor* output_tensor = ctx->Output(0, input_shape); @@ -190,7 +188,7 @@ Status MurmurHash3::Compute(OpKernelContext* ctx) const { seed_, reinterpret_cast(output) + static_cast(i) * output_element_bytes); } else { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type not supported."); + return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type not supported."); } } } diff --git a/onnxruntime/contrib_ops/cpu/non_max_suppression.cc b/onnxruntime/contrib_ops/cpu/non_max_suppression.cc index b406f79f4ce1c..b4696b2a7e4cc 100644 --- a/onnxruntime/contrib_ops/cpu/non_max_suppression.cc +++ b/onnxruntime/contrib_ops/cpu/non_max_suppression.cc @@ -64,19 +64,19 @@ bool NonMaxSuppression::SuppressByIOU(const T* boxes_data, int32_t box_index1 template Status NonMaxSuppression::Compute(OpKernelContext* ctx) const { const Tensor* boxes = ctx->Input(0); - ONNXRUNTIME_ENFORCE(boxes); + ORT_ENFORCE(boxes); const Tensor* scores = ctx->Input(1); - ONNXRUNTIME_ENFORCE(scores); + ORT_ENFORCE(scores); const TensorShape& boxes_shape = boxes->Shape(); auto boxes_dims = boxes_shape.GetDims(); - ONNXRUNTIME_RETURN_IF_NOT(boxes_shape.NumDimensions() == 2, "boxes must be a 2D tensor."); + ORT_RETURN_IF_NOT(boxes_shape.NumDimensions() == 2, "boxes must be a 2D tensor."); int64_t num_boxes = boxes_dims[0]; - ONNXRUNTIME_RETURN_IF_NOT(boxes_dims[1] == 4, "boxes shape must be a 2D tensor with shape [num_boxes, 4]."); + ORT_RETURN_IF_NOT(boxes_dims[1] == 4, "boxes shape must be a 2D tensor with shape [num_boxes, 4]."); const TensorShape& scores_shape = scores->Shape(); - ONNXRUNTIME_RETURN_IF_NOT(scores_shape.NumDimensions() == 1, "boxes must be a 1D tensor."); - ONNXRUNTIME_RETURN_IF_NOT(scores_shape.GetDims()[0] == num_boxes, "scores and boxes should have same num_boxes."); + ORT_RETURN_IF_NOT(scores_shape.NumDimensions() == 1, "boxes must be a 1D tensor."); + ORT_RETURN_IF_NOT(scores_shape.GetDims()[0] == num_boxes, "scores and boxes should have same num_boxes."); if (max_output_size_ <= 0 || boxes_dims[0] == 0) { TensorShape output_shape({0}); diff --git a/onnxruntime/contrib_ops/cpu/non_max_suppression.h b/onnxruntime/contrib_ops/cpu/non_max_suppression.h index 9626a5deb5755..b909ab2c78f06 100644 --- a/onnxruntime/contrib_ops/cpu/non_max_suppression.h +++ b/onnxruntime/contrib_ops/cpu/non_max_suppression.h @@ -15,10 +15,10 @@ class NonMaxSuppression final : public OpKernel { public: NonMaxSuppression(const OpKernelInfo& info) : OpKernel(info), pad_to_max_output_size_(info.GetAttrOrDefault("pad_to_max_output_size", 0)) { - ONNXRUNTIME_ENFORCE(info.GetAttr("max_output_size", &max_output_size_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("iou_threshold", &iou_threshold_).IsOK()); - ONNXRUNTIME_ENFORCE(iou_threshold_ >= 0 && iou_threshold_ <= 1, "iou_threshold must be in range [0, 1]"); - ONNXRUNTIME_ENFORCE(info.GetAttr("score_threshold", &score_threshold_).IsOK()); + ORT_ENFORCE(info.GetAttr("max_output_size", &max_output_size_).IsOK()); + ORT_ENFORCE(info.GetAttr("iou_threshold", &iou_threshold_).IsOK()); + ORT_ENFORCE(iou_threshold_ >= 0 && iou_threshold_ <= 1, "iou_threshold must be in range [0, 1]"); + ORT_ENFORCE(info.GetAttr("score_threshold", &score_threshold_).IsOK()); } Status Compute(OpKernelContext* context) const override; diff --git a/onnxruntime/contrib_ops/cpu/quantize_linear.cc b/onnxruntime/contrib_ops/cpu/quantize_linear.cc index 294e15f95642e..2111475b8b356 100644 --- a/onnxruntime/contrib_ops/cpu/quantize_linear.cc +++ b/onnxruntime/contrib_ops/cpu/quantize_linear.cc @@ -51,13 +51,13 @@ Status DequantizeLinear::Compute(OpKernelContext* ctx) const { if (has_axis_) { // if an axis was specified, ensure the scale and zero point are compatible - ONNXRUNTIME_ENFORCE(scale_shape.NumDimensions() == 1 && scale_shape.Size() == broadcastDim, "x_scale must be 1D tensor with size ", broadcastDim); - ONNXRUNTIME_ENFORCE(zero_point_shape.NumDimensions() == 1 && zero_point_shape.Size() == broadcastDim, "x_zero_point must be 1D tensor with size ", broadcastDim); + ORT_ENFORCE(scale_shape.NumDimensions() == 1 && scale_shape.Size() == broadcastDim, "x_scale must be 1D tensor with size ", broadcastDim); + ORT_ENFORCE(zero_point_shape.NumDimensions() == 1 && zero_point_shape.Size() == broadcastDim, "x_zero_point must be 1D tensor with size ", broadcastDim); stride = 1; } else { // if no axis, enforce that scale and zero point are scalars - ONNXRUNTIME_ENFORCE(scale_shape.NumDimensions() == 0, "x_scale must be a scalar if no axis is provided"); - ONNXRUNTIME_ENFORCE(zero_point_shape.NumDimensions() == 0, "x_zero_point must be a scalar if no axis is provided"); + ORT_ENFORCE(scale_shape.NumDimensions() == 0, "x_scale must be a scalar if no axis is provided"); + ORT_ENFORCE(zero_point_shape.NumDimensions() == 0, "x_zero_point must be a scalar if no axis is provided"); } size_t N = x_shape.SizeToDimension(axis); @@ -125,13 +125,13 @@ Status QuantizeLinear::Compute(OpKernelContext* ctx) const { if (has_axis_) { // if an axis was specified, ensure the scale and zero point are compatible - ONNXRUNTIME_ENFORCE(scale_shape.NumDimensions() == 1 && scale_shape.Size() == broadcastDim, "x_scale must be 1D tensor with size ", broadcastDim); - ONNXRUNTIME_ENFORCE(zero_point_shape.NumDimensions() == 1 && zero_point_shape.Size() == broadcastDim, "x_zero_point must be 1D tensor with size ", broadcastDim); + ORT_ENFORCE(scale_shape.NumDimensions() == 1 && scale_shape.Size() == broadcastDim, "x_scale must be 1D tensor with size ", broadcastDim); + ORT_ENFORCE(zero_point_shape.NumDimensions() == 1 && zero_point_shape.Size() == broadcastDim, "x_zero_point must be 1D tensor with size ", broadcastDim); stride = 1; } else { // if no axis, enforce that scale and zero point are scalars - ONNXRUNTIME_ENFORCE(scale_shape.NumDimensions() == 0, "x_scale must be a scalar if no axis is provided"); - ONNXRUNTIME_ENFORCE(zero_point_shape.NumDimensions() == 0, "x_zero_point must be a scalar if no axis is provided"); + ORT_ENFORCE(scale_shape.NumDimensions() == 0, "x_scale must be a scalar if no axis is provided"); + ORT_ENFORCE(zero_point_shape.NumDimensions() == 0, "x_zero_point must be a scalar if no axis is provided"); } size_t N = x_shape.SizeToDimension(axis); diff --git a/onnxruntime/contrib_ops/cpu/range.cc b/onnxruntime/contrib_ops/cpu/range.cc index a69c32e9904d1..1a7bcf277359f 100644 --- a/onnxruntime/contrib_ops/cpu/range.cc +++ b/onnxruntime/contrib_ops/cpu/range.cc @@ -13,17 +13,17 @@ static Status ComputeRange(OpKernelContext* ctx) { auto delta_tensor_ptr = ctx->Input(2); if (!start_tensor.Shape().IsScalar()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "start in Range operator should be scalar like tensor, yet got shape:", start_tensor.Shape()); } if (!limit_tensor.Shape().IsScalar()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "limit in Range operator should be scalar like tensor, yet got shape:", limit_tensor.Shape()); } if (delta_tensor_ptr != nullptr && !delta_tensor_ptr->Shape().IsScalar()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "delta in Range operator should be scalar like tensor, yet got shape:", delta_tensor_ptr->Shape()); } @@ -33,7 +33,7 @@ static Status ComputeRange(OpKernelContext* ctx) { T delta = (delta_tensor_ptr == nullptr) ? T{1} : *(delta_tensor_ptr->template Data()); if (delta == T{0}) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "delta in Range operator can not be zero!"); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "delta in Range operator can not be zero!"); } int64_t n = static_cast(ceil((1.0 * (limit - start)) / delta)); if (n <= 0) n = 1; @@ -66,7 +66,7 @@ Status Range::Compute(OpKernelContext* ctx) const { else if (data_type == DataTypeImpl::GetType()) { return ComputeRange(ctx); } - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Unsupportted tensor data type:", data_type); } diff --git a/onnxruntime/contrib_ops/cpu/string_normalizer.cc b/onnxruntime/contrib_ops/cpu/string_normalizer.cc index 5fa4262884396..2b76a7bbb486b 100644 --- a/onnxruntime/contrib_ops/cpu/string_normalizer.cc +++ b/onnxruntime/contrib_ops/cpu/string_normalizer.cc @@ -41,7 +41,7 @@ class Locale { : loc_(nullptr) { loc_ = _create_locale(LC_CTYPE, name.c_str()); if (loc_ == nullptr) { - ONNXRUNTIME_THROW("Failed to construct locale with name:", + ORT_THROW("Failed to construct locale with name:", name, ":", ":Please, install necessary language-pack-XX and configure locales"); } } @@ -52,7 +52,7 @@ class Locale { } } - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Locale); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Locale); void ChangeCase(StringNormalizer::CaseAction caseaction, std::wstring& wstr) const { @@ -77,11 +77,11 @@ class Locale { public: explicit Locale(const std::string& name) try : loc_(name) { } catch (const std::runtime_error& e) { - ONNXRUNTIME_THROW("Failed to construct locale with name:", + ORT_THROW("Failed to construct locale with name:", name, ":", e.what(), ":Please, install necessary language-pack-XX and configure locales"); } - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Locale); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Locale); void ChangeCase(StringNormalizer::CaseAction caseaction, std::wstring& wstr) const { @@ -162,12 +162,12 @@ StringNormalizer::StringNormalizer(const OpKernelInfo& info) : OpKernel(info), compare_caseaction_(NONE) { int64_t iscasesensitive = 0; Status status = info.GetAttr("is_case_sensitive", &iscasesensitive); - ONNXRUNTIME_ENFORCE(status.IsOK(), "attribute is_case_sensitive is not set"); + ORT_ENFORCE(status.IsOK(), "attribute is_case_sensitive is not set"); is_case_sensitive_ = iscasesensitive != 0; std::string casechangeaction; status = info.GetAttr("casechangeaction", &casechangeaction); - ONNXRUNTIME_ENFORCE(status.IsOK(), "attribute caseaction is not set"); + ORT_ENFORCE(status.IsOK(), "attribute caseaction is not set"); if (casechangeaction == "LOWER") { casechangeaction_ = LOWER; } else if (casechangeaction == "UPPER") { @@ -175,7 +175,7 @@ StringNormalizer::StringNormalizer(const OpKernelInfo& info) : OpKernel(info), } else if (casechangeaction == "NONE") { casechangeaction_ = NONE; } else { - ONNXRUNTIME_ENFORCE(false, "attribute casechangeaction has invalid value"); + ORT_ENFORCE(false, "attribute casechangeaction has invalid value"); } if (!is_case_sensitive_) { @@ -189,16 +189,16 @@ StringNormalizer::StringNormalizer(const OpKernelInfo& info) : OpKernel(info), std::vector swords = info.GetAttrsOrDefault("stopwords"); for (const auto& sw : swords) { - ONNXRUNTIME_ENFORCE(!sw.empty(), "Empty stopwords not allowed"); + ORT_ENFORCE(!sw.empty(), "Empty stopwords not allowed"); if (is_case_sensitive_) { auto p = stopwords_.insert(sw); - ONNXRUNTIME_ENFORCE(p.second, "Duplicate stopwords not allowed"); + ORT_ENFORCE(p.second, "Duplicate stopwords not allowed"); } else { std::wstring wstr = converter.from_bytes(sw); - ONNXRUNTIME_ENFORCE(wstr != wconv_error, "Stopword contains invalid utf8 chars"); + ORT_ENFORCE(wstr != wconv_error, "Stopword contains invalid utf8 chars"); locale.ChangeCase(compare_caseaction_, wstr); auto p = wstopwords_.insert(wstr); - ONNXRUNTIME_ENFORCE(p.second, "Duplicate stopwords not allowed"); + ORT_ENFORCE(p.second, "Duplicate stopwords not allowed"); } } } diff --git a/onnxruntime/contrib_ops/cpu/tokenizer.cc b/onnxruntime/contrib_ops/cpu/tokenizer.cc index 0efaf5d5ebe4c..90aaeb9ab8e7e 100644 --- a/onnxruntime/contrib_ops/cpu/tokenizer.cc +++ b/onnxruntime/contrib_ops/cpu/tokenizer.cc @@ -199,25 +199,25 @@ struct Tokenizer::SearchData { Tokenizer::Tokenizer(const OpKernelInfo& info) : OpKernel(info) { int64_t mark = 0; auto status = info.GetAttr("mark", &mark); - ONNXRUNTIME_ENFORCE(status.IsOK(), "attribute mark is not set"); + ORT_ENFORCE(status.IsOK(), "attribute mark is not set"); mark_ = mark != 0; status = info.GetAttr("pad_value", &pad_value_); - ONNXRUNTIME_ENFORCE(status.IsOK(), "attribute pad_value is not set"); + ORT_ENFORCE(status.IsOK(), "attribute pad_value is not set"); status = info.GetAttr("mincharnum", &mincharnum_); - ONNXRUNTIME_ENFORCE(status.IsOK(), "attribute mincharnum is not set"); - ONNXRUNTIME_ENFORCE(mincharnum_ > 0, "attribute mincharnum must have a positive value"); + ORT_ENFORCE(status.IsOK(), "attribute mincharnum is not set"); + ORT_ENFORCE(mincharnum_ > 0, "attribute mincharnum must have a positive value"); std::vector separators; status = info.GetAttrs("separators", separators); - ONNXRUNTIME_ENFORCE(status.IsOK(), "attribute separators is not set"); - ONNXRUNTIME_ENFORCE(!separators.empty(), "Requires at least one separator"); + ORT_ENFORCE(status.IsOK(), "attribute separators is not set"); + ORT_ENFORCE(!separators.empty(), "Requires at least one separator"); char_tokenezation_ = (separators.size() == 1 && separators[0].empty()); - ONNXRUNTIME_ENFORCE(!char_tokenezation_ || mincharnum_ < 2, + ORT_ENFORCE(!char_tokenezation_ || mincharnum_ < 2, "mincharnum is too big for char level tokenezation"); // Create TST and insert separators @@ -226,11 +226,11 @@ Tokenizer::Tokenizer(const OpKernelInfo& info) : OpKernel(info) { std::wstring_convert> converter(conv_error, wconv_error); int priority = 0; // earlier search patterns get priority for (const auto& sep : separators) { - ONNXRUNTIME_ENFORCE(!sep.empty(), "No empty separators allowed"); + ORT_ENFORCE(!sep.empty(), "No empty separators allowed"); std::wstring wsep = converter.from_bytes(sep); - ONNXRUNTIME_ENFORCE(wsep != wconv_error, "Separator strings contains invalid utf8 chars"); + ORT_ENFORCE(wsep != wconv_error, "Separator strings contains invalid utf8 chars"); bool result = sd->tst_.put(wsep.c_str(), wsep.length(), {wsep.length(), priority}); - ONNXRUNTIME_ENFORCE(result, "duplicate separator detected"); + ORT_ENFORCE(result, "duplicate separator detected"); ++priority; } search_data_.swap(sd); diff --git a/onnxruntime/core/codegen/tvm/tvm_compiler.cc b/onnxruntime/core/codegen/tvm/tvm_compiler.cc index 964b9fdf78838..133b64780f51e 100644 --- a/onnxruntime/core/codegen/tvm/tvm_compiler.cc +++ b/onnxruntime/core/codegen/tvm/tvm_compiler.cc @@ -11,7 +11,7 @@ TVMGraph::TensorDescriptor::TensorDescriptor(MLDataType type, onnxruntime::Provi ctx_.device_type = DLDeviceType::kDLCPU; ctx_.device_id = 0; } else { - ONNXRUNTIME_NOT_IMPLEMENTED("Non-cpu execution provider not supported on TVM now."); + ORT_NOT_IMPLEMENTED("Non-cpu execution provider not supported on TVM now."); } if (DataTypeImpl::GetTensorType() == type) { @@ -19,7 +19,7 @@ TVMGraph::TensorDescriptor::TensorDescriptor(MLDataType type, onnxruntime::Provi dtype_.bits = 64; dtype_.lanes = 1; } else { - ONNXRUNTIME_NOT_IMPLEMENTED("Non-double type not supported on TVM now."); + ORT_NOT_IMPLEMENTED("Non-double type not supported on TVM now."); } } @@ -36,7 +36,7 @@ class IdGenerator { // This is a special compiler step for the test case that sum two 1-D tensors static void Compile1DAddToTVM(const onnxruntime::Node& node, std::unordered_map& tvm_tensors, onnxruntime::ProviderType execution_provider_type, IdGenerator& generator) { - ONNXRUNTIME_ENFORCE(node.OpType() == "Add"); + ORT_ENFORCE(node.OpType() == "Add"); tvm::Array shape; shape.push_back(tvm::var("n1")); diff --git a/onnxruntime/core/codegen/tvm/tvm_kernel.h b/onnxruntime/core/codegen/tvm/tvm_kernel.h index c6d745f532fe7..59d997e02d2f1 100644 --- a/onnxruntime/core/codegen/tvm/tvm_kernel.h +++ b/onnxruntime/core/codegen/tvm/tvm_kernel.h @@ -20,7 +20,7 @@ class TVMKernel : public OpKernel { public: explicit TVMKernel(const OpKernelInfo& info) : OpKernel(info), tvm_values_(nullptr), dl_tensors_(nullptr), tvm_type_codes_(nullptr) { auto& node = info.node(); - ONNXRUNTIME_ENFORCE(node.NodeType() == Node::Type::Fused); + ORT_ENFORCE(node.NodeType() == Node::Type::Fused); auto func = node.GetFunctionBody(); const onnxruntime::Graph& func_body = func->Body(); //1. compile the onnxruntime Graph to tvm graph. This step is common for all hardware, and provided by onnxruntime framework. @@ -65,7 +65,7 @@ class TVMKernel : public OpKernel { tvm_values_[i].v_handle = &dl_tensors_[i]; i++; } - ONNXRUNTIME_ENFORCE(i == n_args_); + ORT_ENFORCE(i == n_args_); } virtual ~TVMKernel() { @@ -102,7 +102,7 @@ class TVMKernel : public OpKernel { try { evaluate_func_.CallPacked(tvm_args, &rvalue); } catch (std::exception& ex) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "TVM run failed:", ex.what()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "TVM run failed:", ex.what()); } if (rvalue.type_code() != kNull) { return Status(onnxruntime::common::ONNXRUNTIME, onnxruntime::common::FAIL, "TVM return not null"); // TODO: get error code. diff --git a/onnxruntime/core/codegen/tvm/tvm_utils.cc b/onnxruntime/core/codegen/tvm/tvm_utils.cc index 6d474581aad89..d1980ca1d10b4 100644 --- a/onnxruntime/core/codegen/tvm/tvm_utils.cc +++ b/onnxruntime/core/codegen/tvm/tvm_utils.cc @@ -25,7 +25,7 @@ DLDataType ToTvmDLDataType(MLDataType ml_type) { RETURN_DLDATATYPE_IF_MATCH(float, kDLFloat); RETURN_DLDATATYPE_IF_MATCH(double, kDLFloat); - ONNXRUNTIME_NOT_IMPLEMENTED("converting MLDataType ", ml_type, " to tvm DLDataType is not implemented"); + ORT_NOT_IMPLEMENTED("converting MLDataType ", ml_type, " to tvm DLDataType is not implemented"); } } // namespace tvm_codegen diff --git a/onnxruntime/core/common/profiler.cc b/onnxruntime/core/common/profiler.cc index e631a3d173c2f..5c4384c5a3fde 100644 --- a/onnxruntime/core/common/profiler.cc +++ b/onnxruntime/core/common/profiler.cc @@ -12,12 +12,12 @@ ::onnxruntime::TimePoint profiling::Profiler::StartTime() const { } void Profiler::Initialize(const logging::Logger* session_logger) { - ONNXRUNTIME_ENFORCE(session_logger != nullptr); + ORT_ENFORCE(session_logger != nullptr); session_logger_ = session_logger; } void Profiler::StartProfiling(const logging::Logger* custom_logger) { - ONNXRUNTIME_ENFORCE(custom_logger != nullptr); + ORT_ENFORCE(custom_logger != nullptr); profile_with_logger_ = true; custom_logger_ = custom_logger; profiling_start_time_ = StartTime(); @@ -103,7 +103,7 @@ std::string Profiler::EndProfiling() { // Conditionally sync the GPU if the syncGPU flag is set. // void ProfilerSyncGpu() { - ONNXRUNTIME_NOT_IMPLEMENTED("Needs to implement only for gpus"); + ORT_NOT_IMPLEMENTED("Needs to implement only for gpus"); } } // namespace profiling diff --git a/onnxruntime/core/common/profiler.h b/onnxruntime/core/common/profiler.h index 6811a0d890fd9..36c7655973f6a 100644 --- a/onnxruntime/core/common/profiler.h +++ b/onnxruntime/core/common/profiler.h @@ -57,7 +57,7 @@ class Profiler { std::string EndProfiling(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Profiler); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Profiler); // Mutex controlling access to profiler data std::mutex mutex_; diff --git a/onnxruntime/core/common/status.cc b/onnxruntime/core/common/status.cc index a5bc2d6f5ee8d..5fa95e8ab7603 100644 --- a/onnxruntime/core/common/status.cc +++ b/onnxruntime/core/common/status.cc @@ -18,7 +18,7 @@ namespace onnxruntime { namespace common { Status::Status(StatusCategory category, int code, const std::string& msg) { // state_ will be allocated here causing the status to be treated as a failure - ONNXRUNTIME_ENFORCE(code != static_cast(MLStatus::OK)); + ORT_ENFORCE(code != static_cast(MLStatus::OK)); state_ = std::make_unique(category, code, msg); } diff --git a/onnxruntime/core/common/task_thread_pool.h b/onnxruntime/core/common/task_thread_pool.h index 217c651896bf3..71fd5199de2bb 100644 --- a/onnxruntime/core/common/task_thread_pool.h +++ b/onnxruntime/core/common/task_thread_pool.h @@ -145,7 +145,7 @@ class TaskThreadPool { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TaskThreadPool); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TaskThreadPool); /// @brief Entry point for pool threads. void MainLoop(std::size_t index) { diff --git a/onnxruntime/core/framework/allocation_planner.cc b/onnxruntime/core/framework/allocation_planner.cc index 03da58a7863e9..c13d60b71fab7 100644 --- a/onnxruntime/core/framework/allocation_planner.cc +++ b/onnxruntime/core/framework/allocation_planner.cc @@ -75,7 +75,7 @@ std::ostream& operator<<(std::ostream& out, std::pairOpType() << " (" << node->Name() << ")" << std::endl; if (step.free_from_index <= step.free_to_index) { @@ -152,7 +152,7 @@ class PlannerImpl { MLValueIndex Index(const MLValueName& name) { MLValueIndex result; auto status = mlvalue_name_idx_map_.GetIdx(name, result); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); return result; } @@ -178,7 +178,7 @@ class PlannerImpl { } void Reuse(MLValueIndex reused, MLValueIndex reused_for) { - ONNXRUNTIME_ENFORCE(reused != reused_for); + ORT_ENFORCE(reused != reused_for); // find original buffer underlying ml-value we want to reuse: MLValueIndex original = Buffer(reused); // record that the new buffer will reuse that original buffer @@ -199,7 +199,7 @@ class PlannerImpl { // Note: We expect a KernelDef to be available at this point. If it is not available, the // planner would have returned an error status earlier on. - ONNXRUNTIME_ENFORCE(nullptr != p_opkernel_def); + ORT_ENFORCE(nullptr != p_opkernel_def); const std::vector>& alias_map = p_opkernel_def->Alias(); auto& input_args = node.InputDefs(); @@ -260,7 +260,7 @@ class PlannerImpl { const TypeProto& type_proto = ONNX_NAMESPACE::Utils::DataTypeUtils::ToTypeProto(tensor_type); MLDataType ml_data_type = DataTypeImpl::TypeFromProto(type_proto); const TensorTypeBase* tensor_type_base = ml_data_type->AsTensorType(); - ONNXRUNTIME_ENFORCE(nullptr != tensor_type_base); + ORT_ENFORCE(nullptr != tensor_type_base); MLDataType elt_type = tensor_type_base->GetElementType(); return elt_type->Size(); } @@ -367,7 +367,7 @@ class PlannerImpl { // Identify where each output of this node should be allocated. // This is determined by the opkernel bound to the node. auto node = graph_viewer_.GetNode(step.node_index); - ONNXRUNTIME_ENFORCE(nullptr != node); + ORT_ENFORCE(nullptr != node); auto p_kernelDef = utils::GetKernelDef(kernel_registry_, *node); if (nullptr == p_kernelDef) { std::ostringstream errormsg; @@ -377,9 +377,9 @@ class PlannerImpl { } auto exec_provider = execution_providers_.Get(*node); - ONNXRUNTIME_ENFORCE(exec_provider); + ORT_ENFORCE(exec_provider); - auto& default_allocator_info = exec_provider->GetAllocator(0, ONNXRuntimeMemTypeDefault)->Info(); + auto& default_allocator_info = exec_provider->GetAllocator(0, OrtMemTypeDefault)->Info(); auto& mem_type_allocated_args = p_kernelDef->OutputMemoryType(); auto& outputs = pnode->OutputDefs(); auto num_outputs = outputs.size(); @@ -433,15 +433,15 @@ class PlannerImpl { auto wt_index = Index(def_name); SequentialExecutionPlan::AllocPlanPerValue& thisplan = AllocPlan(wt_index); auto* p_provider = execution_providers_.Get(node); - ONNXRUNTIME_ENFORCE(p_provider); + ORT_ENFORCE(p_provider); thisplan.alloc_kind = AllocKind::kAllocateStatically; auto p_opkernelDef = utils::GetKernelDef(kernel_registry_, node); if (MemTypeOnCpuExplicitly(p_opkernelDef->InputMemoryType(), index)) // weights are not output from any node, so it's OK to put its location on CPU provider - thisplan.location = execution_providers_.Get(onnxruntime::kCpuExecutionProvider)->GetAllocator(0, ONNXRuntimeMemTypeDefault)->Info(); + thisplan.location = execution_providers_.Get(onnxruntime::kCpuExecutionProvider)->GetAllocator(0, OrtMemTypeDefault)->Info(); else - thisplan.location = p_provider->GetAllocator(0, ONNXRuntimeMemTypeDefault)->Info(); + thisplan.location = p_provider->GetAllocator(0, OrtMemTypeDefault)->Info(); return Status::OK(); }); @@ -587,7 +587,7 @@ Status PlannerImpl::CreatePlan() { } // compute use counts for all ml-values - ONNXRUNTIME_RETURN_IF_ERROR(ComputeUseCounts()); + ORT_RETURN_IF_ERROR(ComputeUseCounts()); // determine sharing/reuse among ml-values ComputeReusePlan(); diff --git a/onnxruntime/core/framework/allocator.cc b/onnxruntime/core/framework/allocator.cc index 634d625aa97d1..ca9d5e158e183 100644 --- a/onnxruntime/core/framework/allocator.cc +++ b/onnxruntime/core/framework/allocator.cc @@ -21,44 +21,44 @@ void CPUAllocator::Free(void* p) { free(p); } -const ONNXRuntimeAllocatorInfo& CPUAllocator::Info() const { - static constexpr ONNXRuntimeAllocatorInfo cpuAllocatorInfo(CPU, ONNXRuntimeAllocatorType::ONNXRuntimeDeviceAllocator); +const OrtAllocatorInfo& CPUAllocator::Info() const { + static constexpr OrtAllocatorInfo cpuAllocatorInfo(CPU, OrtAllocatorType::OrtDeviceAllocator); return cpuAllocatorInfo; } } // namespace onnxruntime -std::ostream& operator<<(std::ostream& out, const ONNXRuntimeAllocatorInfo& info) { +std::ostream& operator<<(std::ostream& out, const OrtAllocatorInfo& info) { return (out << info.ToString()); } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateAllocatorInfo, const char* name1, ONNXRuntimeAllocatorType type, int id1, ONNXRuntimeMemType mem_type1, ONNXRuntimeAllocatorInfo** out) { - *out = new ONNXRuntimeAllocatorInfo(name1, type, id1, mem_type1); +ORT_API_STATUS_IMPL(OrtCreateAllocatorInfo, const char* name1, OrtAllocatorType type, int id1, OrtMemType mem_type1, OrtAllocatorInfo** out) { + *out = new OrtAllocatorInfo(name1, type, id1, mem_type1); return nullptr; } -ONNXRUNTIME_API(void, ReleaseONNXRuntimeAllocatorInfo, ONNXRuntimeAllocatorInfo* p) { +ORT_API(void, ReleaseOrtAllocatorInfo, OrtAllocatorInfo* p) { delete p; } -ONNXRUNTIME_API(const char*, ONNXRuntimeAllocatorInfoGetName, _In_ ONNXRuntimeAllocatorInfo* ptr) { +ORT_API(const char*, OrtAllocatorInfoGetName, _In_ OrtAllocatorInfo* ptr) { return ptr->name; } -ONNXRUNTIME_API(int, ONNXRuntimeAllocatorInfoGetId, _In_ ONNXRuntimeAllocatorInfo* ptr) { +ORT_API(int, OrtAllocatorInfoGetId, _In_ OrtAllocatorInfo* ptr) { return ptr->id; } -ONNXRUNTIME_API(ONNXRuntimeMemType, ONNXRuntimeAllocatorInfoGetMemType, _In_ ONNXRuntimeAllocatorInfo* ptr) { +ORT_API(OrtMemType, OrtAllocatorInfoGetMemType, _In_ OrtAllocatorInfo* ptr) { return ptr->mem_type; } -ONNXRUNTIME_API(ONNXRuntimeAllocatorType, ONNXRuntimeAllocatorInfoGetType, _In_ ONNXRuntimeAllocatorInfo* ptr) { +ORT_API(OrtAllocatorType, OrtAllocatorInfoGetType, _In_ OrtAllocatorInfo* ptr) { return ptr->type; } -ONNXRUNTIME_API(int, ONNXRuntimeCompareAllocatorInfo, _In_ const ONNXRuntimeAllocatorInfo* info1, _In_ const ONNXRuntimeAllocatorInfo* info2) { +ORT_API(int, OrtCompareAllocatorInfo, _In_ const OrtAllocatorInfo* info1, _In_ const OrtAllocatorInfo* info2) { if (*info1 == *info2) { return 0; } return -1; -} \ No newline at end of file +} diff --git a/onnxruntime/core/framework/allocatormgr.h b/onnxruntime/core/framework/allocatormgr.h index 6ca8f53cb5f2c..3985fd4b66a98 100644 --- a/onnxruntime/core/framework/allocatormgr.h +++ b/onnxruntime/core/framework/allocatormgr.h @@ -11,7 +11,7 @@ namespace onnxruntime { using DeviceAllocatorFactory = std::function(int)>; struct DeviceAllocatorRegistrationInfo { - ONNXRuntimeMemType mem_type; + OrtMemType mem_type; DeviceAllocatorFactory factory; size_t max_mem; }; @@ -21,7 +21,7 @@ AllocatorPtr CreateAllocator(DeviceAllocatorRegistrationInfo info, int device_id class DeviceAllocatorRegistry { public: void RegisterDeviceAllocator(std::string&& name, DeviceAllocatorFactory factory, size_t max_mem, - ONNXRuntimeMemType mem_type = ONNXRuntimeMemTypeDefault) { + OrtMemType mem_type = OrtMemTypeDefault) { DeviceAllocatorRegistrationInfo info({mem_type, factory, max_mem}); device_allocator_registrations_.emplace(std::move(name), std::move(info)); } @@ -34,7 +34,7 @@ class DeviceAllocatorRegistry { private: DeviceAllocatorRegistry() = default; - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(DeviceAllocatorRegistry); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(DeviceAllocatorRegistry); std::map device_allocator_registrations_; }; diff --git a/onnxruntime/core/framework/arena.h b/onnxruntime/core/framework/arena.h index c348f41c400ed..a4df32fe95bb0 100644 --- a/onnxruntime/core/framework/arena.h +++ b/onnxruntime/core/framework/arena.h @@ -26,7 +26,7 @@ class IArenaAllocator : public IAllocator { void Free(void* p) override = 0; virtual size_t Used() const = 0; virtual size_t Max() const = 0; - const ONNXRuntimeAllocatorInfo& Info() const override = 0; + const OrtAllocatorInfo& Info() const override = 0; // allocate host pinned memory? }; @@ -37,7 +37,7 @@ class DummyArena : public IArenaAllocator { public: explicit DummyArena(std::unique_ptr resource_allocator) : allocator_(std::move(resource_allocator)), - info_(allocator_->Info().name, ONNXRuntimeAllocatorType::ONNXRuntimeArenaAllocator, allocator_->Info().id) { + info_(allocator_->Info().name, OrtAllocatorType::OrtArenaAllocator, allocator_->Info().id) { } ~DummyArena() override = default; @@ -57,21 +57,21 @@ class DummyArena : public IArenaAllocator { } size_t Used() const override { - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } size_t Max() const override { - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } - const ONNXRuntimeAllocatorInfo& Info() const override { + const OrtAllocatorInfo& Info() const override { return info_; } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(DummyArena); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(DummyArena); std::unique_ptr allocator_; - ONNXRuntimeAllocatorInfo info_; + OrtAllocatorInfo info_; }; } // namespace onnxruntime diff --git a/onnxruntime/core/framework/bfc_arena.cc b/onnxruntime/core/framework/bfc_arena.cc index 51da47f645085..5c3449571ba9a 100644 --- a/onnxruntime/core/framework/bfc_arena.cc +++ b/onnxruntime/core/framework/bfc_arena.cc @@ -9,7 +9,7 @@ BFCArena::BFCArena(std::unique_ptr resource_allocator, : device_allocator_(std::move(resource_allocator)), free_chunks_list_(kInvalidChunkHandle), next_allocation_id_(1), - info_(device_allocator_->Info().name, ONNXRuntimeAllocatorType::ONNXRuntimeArenaAllocator, device_allocator_->Info().id, device_allocator_->Info().mem_type) { + info_(device_allocator_->Info().name, OrtAllocatorType::OrtArenaAllocator, device_allocator_->Info().id, device_allocator_->Info().mem_type) { curr_region_allocation_bytes_ = RoundedBytes(std::min(total_memory, size_t{1048576})); // Allocate the requested amount of memory. @@ -25,11 +25,11 @@ BFCArena::BFCArena(std::unique_ptr resource_allocator, LOGS_DEFAULT(INFO) << "Creating bin of max chunk size " << bin_size; new (BinFromIndex(b)) Bin(this, bin_size); - ONNXRUNTIME_ENFORCE(BinForSize(bin_size) == BinFromIndex(b)); - ONNXRUNTIME_ENFORCE(BinForSize(bin_size + 255) == BinFromIndex(b)); - ONNXRUNTIME_ENFORCE(BinForSize(bin_size * 2 - 1) == BinFromIndex(b)); + ORT_ENFORCE(BinForSize(bin_size) == BinFromIndex(b)); + ORT_ENFORCE(BinForSize(bin_size + 255) == BinFromIndex(b)); + ORT_ENFORCE(BinForSize(bin_size * 2 - 1) == BinFromIndex(b)); if (b + 1 < kNumBins) { - ONNXRUNTIME_ENFORCE(BinForSize(bin_size * 2) != BinFromIndex(b)); + ORT_ENFORCE(BinForSize(bin_size * 2) != BinFromIndex(b)); } } } @@ -49,7 +49,7 @@ BFCArena::~BFCArena() { } BFCArena::Chunk* BFCArena::ChunkFromHandle(ChunkHandle h) { - ONNXRUNTIME_ENFORCE(h < chunks_.size()); + ORT_ENFORCE(h < chunks_.size()); return &(chunks_[h]); } @@ -155,7 +155,7 @@ size_t BFCArena::RoundedBytes(size_t bytes) { size_t rounded_bytes = (kMinAllocationSize * ((bytes + kMinAllocationSize - 1) / kMinAllocationSize)); - ONNXRUNTIME_ENFORCE(size_t{0} == rounded_bytes % kMinAllocationSize); + ORT_ENFORCE(size_t{0} == rounded_bytes % kMinAllocationSize); return rounded_bytes; } @@ -169,7 +169,7 @@ void* BFCArena::Reserve(size_t size) { std::lock_guard lock(lock_); void* ptr = device_allocator_->Alloc(size); - ONNXRUNTIME_ENFORCE(reserved_chunks_.find(ptr) == reserved_chunks_.end()); + ORT_ENFORCE(reserved_chunks_.find(ptr) == reserved_chunks_.end()); reserved_chunks_.insert(std::pair(ptr, size)); stats_.bytes_in_use += size; stats_.num_allocs += 1; @@ -182,7 +182,7 @@ void* BFCArena::Reserve(size_t size) { size_t BFCArena::RequestedSize(const void* ptr) { std::lock_guard lock(lock_); BFCArena::ChunkHandle h = region_manager_.get_handle(ptr); - ONNXRUNTIME_ENFORCE(h != kInvalidChunkHandle); + ORT_ENFORCE(h != kInvalidChunkHandle); BFCArena::Chunk* c = ChunkFromHandle(h); return c->requested_size; } @@ -190,7 +190,7 @@ size_t BFCArena::RequestedSize(const void* ptr) { size_t BFCArena::AllocatedSize(const void* ptr) { std::lock_guard lock(lock_); BFCArena::ChunkHandle h = region_manager_.get_handle(ptr); - ONNXRUNTIME_ENFORCE(h != kInvalidChunkHandle); + ORT_ENFORCE(h != kInvalidChunkHandle); BFCArena::Chunk* c = ChunkFromHandle(h); return c->size; } @@ -251,7 +251,7 @@ void* BFCArena::FindChunkPtr(BinNum bin_num, size_t rounded_bytes, ++citer) { const BFCArena::ChunkHandle h = (*citer); BFCArena::Chunk* chunk = ChunkFromHandle(h); - ONNXRUNTIME_ENFORCE(!chunk->in_use()); + ORT_ENFORCE(!chunk->in_use()); if (chunk->size >= rounded_bytes) { // We found an existing chunk that fits us that wasn't in use, so remove // it from the free bin structure prior to using. @@ -294,7 +294,7 @@ void BFCArena::SplitChunk(BFCArena::ChunkHandle h, size_t num_bytes) { ChunkHandle h_new_chunk = AllocateChunk(); Chunk* c = ChunkFromHandle(h); - ONNXRUNTIME_ENFORCE(!c->in_use() && (c->bin_num == kInvalidBinNum)); + ORT_ENFORCE(!c->in_use() && (c->bin_num == kInvalidBinNum)); // Create a new chunk starting num_bytes after c BFCArena::Chunk* new_chunk = ChunkFromHandle(h_new_chunk); @@ -343,7 +343,7 @@ void BFCArena::Free(void* p) { void BFCArena::DeallocateRawInternal(void* ptr) { // Find the chunk from the ptr. BFCArena::ChunkHandle h = region_manager_.get_handle(ptr); - ONNXRUNTIME_ENFORCE(h != kInvalidChunkHandle); + ORT_ENFORCE(h != kInvalidChunkHandle); // Consider coalescing it. FreeAndMaybeCoalesce(h); @@ -356,7 +356,7 @@ void BFCArena::Merge(BFCArena::ChunkHandle h1, Chunk* c1 = ChunkFromHandle(h1); Chunk* c2 = ChunkFromHandle(h2); // We can only merge chunks that are not in use. - ONNXRUNTIME_ENFORCE(!c1->in_use() && !c2->in_use()); + ORT_ENFORCE(!c1->in_use() && !c2->in_use()); // c1's prev doesn't change, still points to the same ptr, and is // still not in use. @@ -368,7 +368,7 @@ void BFCArena::Merge(BFCArena::ChunkHandle h1, BFCArena::ChunkHandle h3 = c2->next; c1->next = h3; - ONNXRUNTIME_ENFORCE(c2->prev == h1); + ORT_ENFORCE(c2->prev == h1); if (h3 != kInvalidChunkHandle) { BFCArena::Chunk* c3 = ChunkFromHandle(h3); c3->prev = h1; @@ -390,7 +390,7 @@ void BFCArena::DeleteChunk(ChunkHandle h) { void BFCArena::InsertFreeChunkIntoBin(BFCArena::ChunkHandle h) { Chunk* c = ChunkFromHandle(h); - ONNXRUNTIME_ENFORCE(!c->in_use() && (c->bin_num == kInvalidBinNum)); + ORT_ENFORCE(!c->in_use() && (c->bin_num == kInvalidBinNum)); BinNum bin_num = BinNumForSize(c->size); Bin* new_bin = BinFromIndex(bin_num); c->bin_num = bin_num; @@ -402,22 +402,22 @@ void BFCArena::RemoveFreeChunkIterFromBin( const BFCArena::Bin::FreeChunkSet::iterator& citer) { ChunkHandle h = *citer; Chunk* c = ChunkFromHandle(h); - ONNXRUNTIME_ENFORCE(!c->in_use() && (c->bin_num != kInvalidBinNum)); + ORT_ENFORCE(!c->in_use() && (c->bin_num != kInvalidBinNum)); free_chunks->erase(citer); c->bin_num = kInvalidBinNum; } void BFCArena::RemoveFreeChunkFromBin(BFCArena::ChunkHandle h) { Chunk* c = ChunkFromHandle(h); - ONNXRUNTIME_ENFORCE(!c->in_use() && (c->bin_num != kInvalidBinNum)); - ONNXRUNTIME_ENFORCE(BinFromIndex(c->bin_num)->free_chunks.erase(h) > 0, + ORT_ENFORCE(!c->in_use() && (c->bin_num != kInvalidBinNum)); + ORT_ENFORCE(BinFromIndex(c->bin_num)->free_chunks.erase(h) > 0, "Could not find chunk in bin"); c->bin_num = kInvalidBinNum; } void BFCArena::FreeAndMaybeCoalesce(BFCArena::ChunkHandle h) { Chunk* c = ChunkFromHandle(h); - ONNXRUNTIME_ENFORCE(c->in_use() && (c->bin_num == kInvalidBinNum)); + ORT_ENFORCE(c->in_use() && (c->bin_num == kInvalidBinNum)); // Mark the chunk as no longer in use c->allocation_id = -1; @@ -480,8 +480,8 @@ BFCArena::get_bin_debug_info() { bin_info.total_chunks_in_use++; } else { Bin* bin = BinFromIndex(bin_num); - ONNXRUNTIME_ENFORCE(bin->free_chunks.count(h) == 1); - ONNXRUNTIME_ENFORCE(c->bin_num == bin_num); + ORT_ENFORCE(bin->free_chunks.count(h) == 1); + ORT_ENFORCE(c->bin_num == bin_num); } h = c->next; } @@ -494,7 +494,7 @@ void BFCArena::DumpMemoryLog(size_t num_bytes) { for (BinNum bin_num = 0; bin_num < kNumBins; bin_num++) { Bin* b = BinFromIndex(bin_num); const BinDebugInfo& bin_info = bin_infos[bin_num]; - ONNXRUNTIME_ENFORCE(b->free_chunks.size() == + ORT_ENFORCE(b->free_chunks.size() == bin_info.total_chunks_in_bin - bin_info.total_chunks_in_use); LOGS_DEFAULT(INFO) << "Bin (" << b->bin_size diff --git a/onnxruntime/core/framework/bfc_arena.h b/onnxruntime/core/framework/bfc_arena.h index 6dec27785827a..31f4f9abc4264 100644 --- a/onnxruntime/core/framework/bfc_arena.h +++ b/onnxruntime/core/framework/bfc_arena.h @@ -103,7 +103,7 @@ class BFCArena : public IArenaAllocator { return memory_limit_; } - const ONNXRuntimeAllocatorInfo& Info() const override { + const OrtAllocatorInfo& Info() const override { return info_; } @@ -229,7 +229,7 @@ class BFCArena : public IArenaAllocator { memory_size_(memory_size), end_ptr_( static_cast(static_cast(ptr_) + memory_size_)) { - ONNXRUNTIME_ENFORCE(0 == memory_size % kMinAllocationSize); + ORT_ENFORCE(0 == memory_size % kMinAllocationSize); const size_t n_handles = (memory_size + kMinAllocationSize - 1) / kMinAllocationSize; handles_ = new ChunkHandle[n_handles]; @@ -269,8 +269,8 @@ class BFCArena : public IArenaAllocator { int IndexFor(const void* p) const { std::uintptr_t p_int = reinterpret_cast(p); std::uintptr_t base_int = reinterpret_cast(ptr_); - ONNXRUNTIME_ENFORCE(p_int >= base_int); - ONNXRUNTIME_ENFORCE(p_int < base_int + memory_size_); + ORT_ENFORCE(p_int >= base_int); + ORT_ENFORCE(p_int < base_int + memory_size_); return static_cast(((p_int - base_int) >> kMinAllocationBits)); } @@ -284,7 +284,7 @@ class BFCArena : public IArenaAllocator { // for the memory allocation represented by "p" ChunkHandle* handles_ = nullptr; - ONNXRUNTIME_DISALLOW_ASSIGNMENT(AllocationRegion); + ORT_DISALLOW_ASSIGNMENT(AllocationRegion); }; // RegionManager aggregates one or more "AllocationRegions" and provides @@ -316,7 +316,7 @@ class BFCArena : public IArenaAllocator { const std::vector& regions() const { return regions_; } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RegionManager); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RegionManager); static bool Comparator(const void* ptr, const AllocationRegion& other) { return ptr < other.end_ptr(); @@ -474,11 +474,11 @@ class BFCArena : public IArenaAllocator { AllocatorStats stats_; - ONNXRuntimeAllocatorInfo info_; + OrtAllocatorInfo info_; std::unordered_map reserved_chunks_; - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(BFCArena); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(BFCArena); }; #ifdef __GNUC__ #pragma GCC diagnostic pop diff --git a/onnxruntime/core/framework/data_types.cc b/onnxruntime/core/framework/data_types.cc index 2a5b8a3133810..1ebb69b086686 100644 --- a/onnxruntime/core/framework/data_types.cc +++ b/onnxruntime/core/framework/data_types.cc @@ -194,7 +194,7 @@ bool IsCompatible(const ONNX_NAMESPACE::TypeProto_Map& map_proto, result = IsCompatible(lhs.value_type().sparse_tensor_type(), rhs.value_type().sparse_tensor_type()); break; default: - ONNXRUNTIME_ENFORCE(false); + ORT_ENFORCE(false); break; } } else { @@ -227,7 +227,7 @@ bool IsCompatible(const ONNX_NAMESPACE::TypeProto_Sequence& sequence_proto, result = IsCompatible(lhs.elem_type().sparse_tensor_type(), rhs.elem_type().sparse_tensor_type()); break; default: - ONNXRUNTIME_ENFORCE(false); + ORT_ENFORCE(false); break; } } else { @@ -283,10 +283,10 @@ class DataTypeRegistry { void RegisterDataType(MLDataType mltype) { using namespace ONNX_NAMESPACE; const auto* proto = mltype->GetTypeProto(); - ONNXRUNTIME_ENFORCE(proto != nullptr, "Only ONNX MLDataType can be registered"); + ORT_ENFORCE(proto != nullptr, "Only ONNX MLDataType can be registered"); DataType type = Utils::DataTypeUtils::ToType(*proto); auto p = mapping_.insert(std::make_pair(type, mltype)); - ONNXRUNTIME_ENFORCE(p.second, "We do not expect duplicate registration of types for: ", type); + ORT_ENFORCE(p.second, "We do not expect duplicate registration of types for: ", type); } MLDataType GetMLDataType(const ONNX_NAMESPACE::TypeProto& proto) const { @@ -352,8 +352,8 @@ bool TensorTypeBase::IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) c return false; } - ONNXRUNTIME_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kTensorType); - ONNXRUNTIME_ENFORCE(thisProto->tensor_type().has_elem_type()); + ORT_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kTensorType); + ORT_ENFORCE(thisProto->tensor_type().has_elem_type()); return data_types_internal::IsCompatible(thisProto->tensor_type(), type_proto.tensor_type()); } @@ -389,9 +389,9 @@ bool NonTensorTypeBase::IsMapCompatible(const ONNX_NAMESPACE::TypeProto& type_pr if (type_proto.value_case() != TypeProto::ValueCase::kMapType) { return false; } - ONNXRUNTIME_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kMapType); - ONNXRUNTIME_ENFORCE(thisProto->map_type().has_key_type()); - ONNXRUNTIME_ENFORCE(thisProto->map_type().has_value_type()); + ORT_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kMapType); + ORT_ENFORCE(thisProto->map_type().has_key_type()); + ORT_ENFORCE(thisProto->map_type().has_value_type()); return data_types_internal::IsCompatible(thisProto->map_type(), type_proto.map_type()); } @@ -403,8 +403,8 @@ bool NonTensorTypeBase::IsSequenceCompatible(const ONNX_NAMESPACE::TypeProto& ty if (type_proto.value_case() != TypeProto::ValueCase::kSequenceType) { return false; } - ONNXRUNTIME_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kSequenceType); - ONNXRUNTIME_ENFORCE(thisProto->sequence_type().has_elem_type()); + ORT_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kSequenceType); + ORT_ENFORCE(thisProto->sequence_type().has_elem_type()); return data_types_internal::IsCompatible(thisProto->sequence_type(), type_proto.sequence_type()); } @@ -416,42 +416,42 @@ bool NonTensorTypeBase::IsOpaqueCompatible(const ONNX_NAMESPACE::TypeProto& type if (type_proto.value_case() != TypeProto::ValueCase::kOpaqueType) { return false; } - ONNXRUNTIME_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kOpaqueType); - ONNXRUNTIME_ENFORCE(thisProto->opaque_type().has_domain()); - ONNXRUNTIME_ENFORCE(thisProto->opaque_type().has_name()); + ORT_ENFORCE(thisProto->value_case() == TypeProto::ValueCase::kOpaqueType); + ORT_ENFORCE(thisProto->opaque_type().has_domain()); + ORT_ENFORCE(thisProto->opaque_type().has_name()); return data_types_internal::IsCompatible(thisProto->opaque_type(), type_proto.opaque_type()); } -ONNXRUNTIME_REGISTER_TENSOR_TYPE(int32_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(float); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(bool); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(std::string); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(int8_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(uint8_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(uint16_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(int16_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(int64_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(double); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(uint32_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(uint64_t); -ONNXRUNTIME_REGISTER_TENSOR_TYPE(MLFloat16); - -ONNXRUNTIME_REGISTER_MAP(MapStringToString); -ONNXRUNTIME_REGISTER_MAP(MapStringToInt64); -ONNXRUNTIME_REGISTER_MAP(MapStringToFloat); -ONNXRUNTIME_REGISTER_MAP(MapStringToDouble); -ONNXRUNTIME_REGISTER_MAP(MapInt64ToString); -ONNXRUNTIME_REGISTER_MAP(MapInt64ToInt64); -ONNXRUNTIME_REGISTER_MAP(MapInt64ToFloat); -ONNXRUNTIME_REGISTER_MAP(MapInt64ToDouble); - -ONNXRUNTIME_REGISTER_SEQ(VectorString); -ONNXRUNTIME_REGISTER_SEQ(VectorFloat); -ONNXRUNTIME_REGISTER_SEQ(VectorInt64); -ONNXRUNTIME_REGISTER_SEQ(VectorDouble); - -ONNXRUNTIME_REGISTER_SEQ(VectorMapStringToFloat); -ONNXRUNTIME_REGISTER_SEQ(VectorMapInt64ToFloat); +ORT_REGISTER_TENSOR_TYPE(int32_t); +ORT_REGISTER_TENSOR_TYPE(float); +ORT_REGISTER_TENSOR_TYPE(bool); +ORT_REGISTER_TENSOR_TYPE(std::string); +ORT_REGISTER_TENSOR_TYPE(int8_t); +ORT_REGISTER_TENSOR_TYPE(uint8_t); +ORT_REGISTER_TENSOR_TYPE(uint16_t); +ORT_REGISTER_TENSOR_TYPE(int16_t); +ORT_REGISTER_TENSOR_TYPE(int64_t); +ORT_REGISTER_TENSOR_TYPE(double); +ORT_REGISTER_TENSOR_TYPE(uint32_t); +ORT_REGISTER_TENSOR_TYPE(uint64_t); +ORT_REGISTER_TENSOR_TYPE(MLFloat16); + +ORT_REGISTER_MAP(MapStringToString); +ORT_REGISTER_MAP(MapStringToInt64); +ORT_REGISTER_MAP(MapStringToFloat); +ORT_REGISTER_MAP(MapStringToDouble); +ORT_REGISTER_MAP(MapInt64ToString); +ORT_REGISTER_MAP(MapInt64ToInt64); +ORT_REGISTER_MAP(MapInt64ToFloat); +ORT_REGISTER_MAP(MapInt64ToDouble); + +ORT_REGISTER_SEQ(VectorString); +ORT_REGISTER_SEQ(VectorFloat); +ORT_REGISTER_SEQ(VectorInt64); +ORT_REGISTER_SEQ(VectorDouble); + +ORT_REGISTER_SEQ(VectorMapStringToFloat); +ORT_REGISTER_SEQ(VectorMapInt64ToFloat); // Used for Tensor Proto registrations #define REGISTER_TENSOR_PROTO(TYPE, reg_fn) \ @@ -512,7 +512,7 @@ MLDataType DataTypeImpl::TypeFromProto(const ONNX_NAMESPACE::TypeProto& proto) { switch (proto.value_case()) { case TypeProto::ValueCase::kTensorType: { const auto& tensor_type = proto.tensor_type(); - ONNXRUNTIME_ENFORCE(tensor_type.has_elem_type()); + ORT_ENFORCE(tensor_type.has_elem_type()); switch (tensor_type.elem_type()) { case TensorProto_DataType_FLOAT: return DataTypeImpl::GetTensorType(); @@ -541,7 +541,7 @@ MLDataType DataTypeImpl::TypeFromProto(const ONNX_NAMESPACE::TypeProto& proto) { case TensorProto_DataType_FLOAT16: return DataTypeImpl::GetTensorType(); default: - ONNXRUNTIME_NOT_IMPLEMENTED("tensor type ", tensor_type.elem_type(), " is not supported"); + ORT_NOT_IMPLEMENTED("tensor type ", tensor_type.elem_type(), " is not supported"); } } break; // kTensorType case TypeProto::ValueCase::kMapType: { @@ -597,13 +597,13 @@ MLDataType DataTypeImpl::TypeFromProto(const ONNX_NAMESPACE::TypeProto& proto) { break; } MLDataType type = registry.GetMLDataType(proto); - ONNXRUNTIME_ENFORCE(type != nullptr, "Map with key type: ", keytype, " value type: ", value_elem_type, " is not registered"); + ORT_ENFORCE(type != nullptr, "Map with key type: ", keytype, " value type: ", value_elem_type, " is not registered"); return type; } // not if(scalar tensor) pre-reg types MLDataType type = registry.GetMLDataType(proto); if (type == nullptr) { DataType str_type = ONNX_NAMESPACE::Utils::DataTypeUtils::ToType(proto); - ONNXRUNTIME_NOT_IMPLEMENTED("type: ", *str_type, " is not registered"); + ORT_NOT_IMPLEMENTED("type: ", *str_type, " is not registered"); } return type; @@ -665,26 +665,26 @@ MLDataType DataTypeImpl::TypeFromProto(const ONNX_NAMESPACE::TypeProto& proto) { MLDataType type = registry.GetMLDataType(proto); if (type == nullptr) { DataType str_type = ONNX_NAMESPACE::Utils::DataTypeUtils::ToType(proto); - ONNXRUNTIME_NOT_IMPLEMENTED("type: ", *str_type, " is not currently registered or supported"); + ORT_NOT_IMPLEMENTED("type: ", *str_type, " is not currently registered or supported"); } return type; } //Below are the types the we need to execute the runtime //They are not compatible with TypeProto in ONNX. -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(int32_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(float); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(bool); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(std::string); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(int8_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(uint8_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(uint16_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(int16_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(int64_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(double); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(uint32_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(uint64_t); -ONNXRUNTIME_REGISTER_NON_ONNX_TYPE(MLFloat16); +ORT_REGISTER_NON_ONNX_TYPE(int32_t); +ORT_REGISTER_NON_ONNX_TYPE(float); +ORT_REGISTER_NON_ONNX_TYPE(bool); +ORT_REGISTER_NON_ONNX_TYPE(std::string); +ORT_REGISTER_NON_ONNX_TYPE(int8_t); +ORT_REGISTER_NON_ONNX_TYPE(uint8_t); +ORT_REGISTER_NON_ONNX_TYPE(uint16_t); +ORT_REGISTER_NON_ONNX_TYPE(int16_t); +ORT_REGISTER_NON_ONNX_TYPE(int64_t); +ORT_REGISTER_NON_ONNX_TYPE(double); +ORT_REGISTER_NON_ONNX_TYPE(uint32_t); +ORT_REGISTER_NON_ONNX_TYPE(uint64_t); +ORT_REGISTER_NON_ONNX_TYPE(MLFloat16); const std::vector& DataTypeImpl::AllFixedSizeTensorTypes() { static std::vector all_fixed_size_tensor_types = diff --git a/onnxruntime/core/framework/environment.cc b/onnxruntime/core/framework/environment.cc index a8dd885aee0d6..f050ecbc123f2 100644 --- a/onnxruntime/core/framework/environment.cc +++ b/onnxruntime/core/framework/environment.cc @@ -34,7 +34,7 @@ Status Environment::Initialize() { // Experimental operator does not have history kept in ONNX. Unfortunately, RS5 takes bunch of experimental operators // in onnx as production ops. MVN is one of them. Now (9/26/2018) MVN is a production function in ONNX. The experimental // MVN op was removed. The history has to be kept locally as below. - ONNXRUNTIME_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MeanVarianceNormalization) + ORT_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MeanVarianceNormalization) .SetDoc(R"DOC(Perform mean variance normalization.)DOC") .Attr("across_channels", "If 1, mean and variance are computed across channels. Default is 0.", AttributeProto::INT, static_cast(0)) .Attr("normalize_variance", "If 0, normalize the mean only. Default is 1.", AttributeProto::INT, static_cast(1)) @@ -46,7 +46,7 @@ Status Environment::Initialize() { "Constrain input and output types to float tensors.") .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput); // MVN operator is deprecated since operator set 9 (replaced with MVN function). - ONNXRUNTIME_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MeanVarianceNormalization) + ORT_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MeanVarianceNormalization) .SetDoc(R"DOC(Perform mean variance normalization.)DOC") .SinceVersion(9) .Deprecate() @@ -63,7 +63,7 @@ Status Environment::Initialize() { // Register MemCpy schema; // These ops are internal-only, so register outside of onnx - ONNXRUNTIME_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MemcpyFromHost) + ORT_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MemcpyFromHost) .Input(0, "X", "input", "T") .Output(0, "Y", "output", "T") .TypeConstraint( @@ -75,7 +75,7 @@ Status Environment::Initialize() { Internal copy node )DOC"); - ONNXRUNTIME_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MemcpyToHost) + ORT_ATTRIBUTE_UNUSED ONNX_OPERATOR_SCHEMA(MemcpyToHost) .Input(0, "X", "input", "T") .Output(0, "Y", "output", "T") .TypeConstraint( diff --git a/onnxruntime/core/framework/error_code.cc b/onnxruntime/core/framework/error_code.cc index f59d3169e3523..8513905034f43 100644 --- a/onnxruntime/core/framework/error_code.cc +++ b/onnxruntime/core/framework/error_code.cc @@ -7,7 +7,7 @@ #include using onnxruntime::common::Status; -ONNXRUNTIME_API(ONNXStatus*, CreateONNXStatus, ONNXRuntimeErrorCode code, const char* msg) { +ORT_API(ONNXStatus*, CreateONNXStatus, OrtErrorCode code, const char* msg) { assert(!(code == 0 && msg != nullptr)); size_t clen = strlen(msg); size_t len = clen + 1 + sizeof(int); @@ -36,10 +36,10 @@ ONNXStatus* ToONNXStatus(const Status& st) { return ret; } } // namespace onnxruntime -ONNXRUNTIME_API(ONNXRuntimeErrorCode, ONNXRuntimeGetErrorCode, _In_ const ONNXStatus* status) { - return *reinterpret_cast(const_cast(status)); +ORT_API(OrtErrorCode, OrtGetErrorCode, _In_ const ONNXStatus* status) { + return *reinterpret_cast(const_cast(status)); } -ONNXRUNTIME_API(const char*, ONNXRuntimeGetErrorMessage, _In_ const ONNXStatus* status) { +ORT_API(const char*, OrtGetErrorMessage, _In_ const ONNXStatus* status) { return reinterpret_cast(status) + sizeof(int); } diff --git a/onnxruntime/core/framework/execution_frame.cc b/onnxruntime/core/framework/execution_frame.cc index b5df11f7d03b8..c5323c4e0a0ed 100644 --- a/onnxruntime/core/framework/execution_frame.cc +++ b/onnxruntime/core/framework/execution_frame.cc @@ -20,7 +20,7 @@ ExecutionFrame::ExecutionFrame(const std::unordered_map& f const ::onnxruntime::SessionState& session_state) : session_state_(session_state), mem_patterns_(nullptr), planner_(nullptr) { auto* graph = session_state.GetGraphViewer(); - ONNXRUNTIME_ENFORCE(graph); + ORT_ENFORCE(graph); Init(*graph, feeds, output_names, fetches); // If the session enable memory pattern optimization @@ -49,7 +49,7 @@ ExecutionFrame::ExecutionFrame(const std::unordered_map& f // pre-allocate the big chunk requested in memory pattern. // all the internal kernel's input/output tensors will be allocated on these buffer. for (size_t i = 0; i < mem_patterns_->locations.size(); i++) { - ONNXRUNTIME_ENFORCE(buffers_.find(mem_patterns_->locations[i]) == buffers_.end()); + ORT_ENFORCE(buffers_.find(mem_patterns_->locations[i]) == buffers_.end()); AllocatorPtr alloc = GetAllocator(mem_patterns_->locations[i]); void* buffer = mem_patterns_->patterns[i].PeakSize() > 0 ? alloc->Alloc(mem_patterns_->patterns[i].PeakSize()) : nullptr; buffers_[mem_patterns_->locations[i]] = BufferUniquePtr(buffer, alloc); @@ -63,16 +63,16 @@ ExecutionFrame::~ExecutionFrame() = default; Status ExecutionFrame::AllocateMLValueTensorSelfOwnBuffer(int mlvalue_index, const DataTypeImpl* element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence) { - ONNXRUNTIME_ENFORCE(mlvalue_index >= 0 && static_cast(mlvalue_index) < all_values_.size()); + ORT_ENFORCE(mlvalue_index >= 0 && static_cast(mlvalue_index) < all_values_.size()); return AllocateMLValueTensorSelfOwnBufferHelper(mlvalue_index, element_type, location, shape, create_fence); } Status ExecutionFrame::AllocateMLValueTensorSelfOwnBufferHelper(int mlvalue_index, const DataTypeImpl* element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence) { if (mlvalue_index < 0) @@ -95,7 +95,7 @@ Status ExecutionFrame::AllocateMLValueTensorSelfOwnBufferHelper(int mlvalue_inde } // create fence if needed if (create_fence) { - ONNXRUNTIME_ENFORCE(p_mlvalue->Fence() == nullptr); + ORT_ENFORCE(p_mlvalue->Fence() == nullptr); FencePtr f = alloc->CreateFence(&SessionState()); // it is OK to have fence been nullptr if the execution provider has no async execution, // and allocator::CreateFence returns nullptr @@ -164,23 +164,23 @@ void ExecutionFrame::TraceAllocate(int mlvalue_idx, size_t size) { Status ExecutionFrame::AllocateTensorWithSelfOwnBuffer(const int index, const DataTypeImpl* element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence) { - ONNXRUNTIME_ENFORCE(index >= 0 && static_cast(index) < node_values_.size()); + ORT_ENFORCE(index >= 0 && static_cast(index) < node_values_.size()); return AllocateMLValueTensorSelfOwnBufferHelper(node_values_[index], element_type, location, shape, create_fence); } Status ExecutionFrame::AllocateMLValueTensorPreAllocateBuffer(int mlvalue_index_to_allocate, int mlvalue_index_reuse, const DataTypeImpl* element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence) { - ONNXRUNTIME_ENFORCE(mlvalue_index_to_allocate >= 0 && mlvalue_index_to_allocate < all_values_.size()); + ORT_ENFORCE(mlvalue_index_to_allocate >= 0 && mlvalue_index_to_allocate < all_values_.size()); MLValue* p_mlvalue = &all_values_[mlvalue_index_to_allocate]; - ONNXRUNTIME_ENFORCE(mlvalue_index_reuse >= 0 && mlvalue_index_reuse < all_values_.size()); + ORT_ENFORCE(mlvalue_index_reuse >= 0 && mlvalue_index_reuse < all_values_.size()); MLValue* p_mlvalue_reuse = &all_values_[mlvalue_index_reuse]; auto* reuse_tensor = p_mlvalue_reuse->GetMutable(); @@ -201,7 +201,7 @@ Status ExecutionFrame::AllocateMLValueTensorPreAllocateBuffer(int mlvalue_index_ Status ExecutionFrame::AllocateTensorWithPreAllocateBufferHelper(MLValue* p_mlvalue, void* pBuffer, const DataTypeImpl* element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape) { if (p_mlvalue->IsAllocated()) { return Status::OK(); @@ -220,9 +220,9 @@ Status ExecutionFrame::AllocateTensorWithPreAllocateBufferHelper(MLValue* p_mlva Status ExecutionFrame::AllocateTensorWithPreAllocateBuffer(const int offset, void* pBuffer, const DataTypeImpl* element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape) { - ONNXRUNTIME_ENFORCE(offset >= 0 && offset < node_values_.size()); + ORT_ENFORCE(offset >= 0 && offset < node_values_.size()); if (node_values_[offset] < 0) return Status(ONNXRUNTIME, FAIL, "Trying to allocate memory for unused optional inputs/outputs"); auto value = &all_values_[node_values_[offset]]; @@ -230,7 +230,7 @@ Status ExecutionFrame::AllocateTensorWithPreAllocateBuffer(const int offset, } void ExecutionFrame::Release(const int offset) { - ONNXRUNTIME_ENFORCE(offset >= 0 && offset < node_offsets_.size()); + ORT_ENFORCE(offset >= 0 && offset < node_offsets_.size()); if (node_values_[offset] >= 0 && node_values_[offset] < all_values_.size()) { all_values_[node_values_[offset]] = MLValue(); TraceFree(node_values_[offset]); @@ -242,7 +242,7 @@ Status AllocateTraditionalMLValue(MLValue* p_mlvalue, const MLValueAllocationParameters& parameters) { // right now we don't need any parameter for ml value creation, // keep it in api for extensibility - ONNXRUNTIME_UNUSED_PARAMETER(parameters); + ORT_UNUSED_PARAMETER(parameters); auto creator = type->GetCreateFunc(); p_mlvalue->Init(creator(), type, @@ -258,7 +258,7 @@ Status ExecutionFrame::AllocateAsPerAllocationPlan(int mlvalue_index, "Tried to allocated with invalid mlvalue index: " + std::to_string(mlvalue_index)); const SequentialExecutionPlan* p_seq_exec_plan = session_state_.GetExecutionPlan(); const auto& alloc_plan = p_seq_exec_plan->allocation_plan; - ONNXRUNTIME_ENFORCE(mlvalue_index >= 0 && mlvalue_index < alloc_plan.size()); + ORT_ENFORCE(mlvalue_index >= 0 && mlvalue_index < alloc_plan.size()); const auto& per_alloc_plan = alloc_plan[mlvalue_index]; auto alloc_info = per_alloc_plan.location; @@ -281,7 +281,7 @@ Status ExecutionFrame::AllocateAsPerAllocationPlan(int mlvalue_index, // In the future we may want to have different way to handle it. case AllocKind::kAllocateOutput: case AllocKind::kAllocate: { - ONNXRUNTIME_RETURN_IF_ERROR(AllocateMLValueTensorSelfOwnBuffer(mlvalue_index, + ORT_RETURN_IF_ERROR(AllocateMLValueTensorSelfOwnBuffer(mlvalue_index, ml_data_type, alloc_info, parameters.tensor_shape, @@ -290,7 +290,7 @@ Status ExecutionFrame::AllocateAsPerAllocationPlan(int mlvalue_index, } case AllocKind::kReuse: { int reuse_mlvalue_index = per_alloc_plan.reused_buffer; - ONNXRUNTIME_RETURN_IF_ERROR(AllocateMLValueTensorPreAllocateBuffer(mlvalue_index, + ORT_RETURN_IF_ERROR(AllocateMLValueTensorPreAllocateBuffer(mlvalue_index, reuse_mlvalue_index, ml_data_type, alloc_info, @@ -332,7 +332,7 @@ void ExecutionFrame::Init(const onnxruntime::GraphViewer& graph, for (const auto& feed : feeds) { int mlvalue_idx; Status status = mlvalue_idx_map.GetIdx(feed.first, mlvalue_idx); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); // we are sharing the underline tensor/object for MLValue all_values_[mlvalue_idx] = feed.second; } @@ -342,13 +342,13 @@ void ExecutionFrame::Init(const onnxruntime::GraphViewer& graph, for (const auto& oname : output_names) { int mlvalue_idx; Status status = mlvalue_idx_map.GetIdx(oname, mlvalue_idx); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); output_indices_.push_back(mlvalue_idx); } if (!fetches.empty()) { // should've already verified this much before when Run() starts - ONNXRUNTIME_ENFORCE(output_names.size() == fetches.size(), + ORT_ENFORCE(output_names.size() == fetches.size(), "output_names vector size: " + std::to_string(output_names.size()) + " does not match that of fetches vector: " + std::to_string(fetches.size())); @@ -356,7 +356,7 @@ void ExecutionFrame::Init(const onnxruntime::GraphViewer& graph, for (const auto& oname : output_names) { int mlvalue_idx; Status status = mlvalue_idx_map.GetIdx(oname, mlvalue_idx); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); all_values_[mlvalue_idx] = fetches.at(idx++); output_indices_.push_back(mlvalue_idx); } @@ -364,7 +364,7 @@ void ExecutionFrame::Init(const onnxruntime::GraphViewer& graph, // 5. set node args for (auto& node : graph.Nodes()) { - ONNXRUNTIME_ENFORCE(node.Index() < node_offsets_.size()); + ORT_ENFORCE(node.Index() < node_offsets_.size()); node_offsets_[node.Index()] = static_cast(node_values_.size()); for (auto input_def : node.InputDefs()) { @@ -382,7 +382,7 @@ void ExecutionFrame::Init(const onnxruntime::GraphViewer& graph, } void ExecutionFrame::SetupNodeArg(const onnxruntime::NodeArg* arg) { - ONNXRUNTIME_ENFORCE(arg); + ORT_ENFORCE(arg); auto& name = arg->Name(); //if the arg's name is empty, it is an not needed optional input/output //set index to -1 @@ -391,7 +391,7 @@ void ExecutionFrame::SetupNodeArg(const onnxruntime::NodeArg* arg) { } else { int index; Status status = session_state_.GetMLValueNameIdxMap().GetIdx(name, index); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); node_values_.push_back(index); } } @@ -433,7 +433,7 @@ Status ExecutionFrame::GeneratePatterns(MemoryPatternGroup* out) const { // Return nullptr if index map to an value that is an unused optional input/output const MLValue* ExecutionFrame::GetNodeInputOrOutputMLValue(int index) const { - ONNXRUNTIME_ENFORCE(index >= 0 && static_cast(index) < node_values_.size()); + ORT_ENFORCE(index >= 0 && static_cast(index) < node_values_.size()); return node_values_[index] >= 0 ? &all_values_[node_values_[index]] : nullptr; } @@ -442,7 +442,7 @@ MLValue* ExecutionFrame::GetMutableNodeInputOrOutputMLValue(int index) { return const_cast(GetNodeInputOrOutputMLValue(index)); } -AllocatorPtr ExecutionFrame::GetAllocator(const ONNXRuntimeAllocatorInfo& info) { +AllocatorPtr ExecutionFrame::GetAllocator(const OrtAllocatorInfo& info) { return utils::GetAllocator(session_state_, info); } @@ -451,7 +451,7 @@ static inline void VerifyShape(const MLValue* p_mlvalue, if (p_mlvalue->IsTensor()) { const Tensor* tensor = &p_mlvalue->Get(); - ONNXRUNTIME_ENFORCE(tensor->Shape() == parameters.tensor_shape, + ORT_ENFORCE(tensor->Shape() == parameters.tensor_shape, "MLValue shape verification failed. Current shape:", tensor->Shape(), " Requested shape:", parameters.tensor_shape); } @@ -483,13 +483,13 @@ Status ExecutionFrame::GetOrCreateNodeOutputMLValue(int index, } // It's not allocated, then allocate it with given shape and return. // Perform allocation based on the allocation plan - ONNXRUNTIME_RETURN_IF_ERROR(AllocateAsPerAllocationPlan(node_values_[index], parameters)); + ORT_RETURN_IF_ERROR(AllocateAsPerAllocationPlan(node_values_[index], parameters)); return Status::OK(); } Status ExecutionFrame::ReleaseMLValue(int mlvalue_idx) { if (mlvalue_idx < 0 || static_cast(mlvalue_idx) >= all_values_.size()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "invalid index ", mlvalue_idx); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "invalid index ", mlvalue_idx); } all_values_[mlvalue_idx] = MLValue(); TraceFree(mlvalue_idx); @@ -499,7 +499,7 @@ Status ExecutionFrame::ReleaseMLValue(int mlvalue_idx) { const SequentialExecutionPlan::AllocPlanPerValue& ExecutionFrame::GetAllocationPlan(int mlvalue_idx) { const SequentialExecutionPlan* p_seq_exec_plan = session_state_.GetExecutionPlan(); const auto& alloc_plan = p_seq_exec_plan->allocation_plan; - ONNXRUNTIME_ENFORCE(mlvalue_idx >= 0 && mlvalue_idx < alloc_plan.size()); + ORT_ENFORCE(mlvalue_idx >= 0 && mlvalue_idx < alloc_plan.size()); return alloc_plan[mlvalue_idx]; } } // namespace onnxruntime diff --git a/onnxruntime/core/framework/execution_frame.h b/onnxruntime/core/framework/execution_frame.h index 60b32963fccb6..1d0a410e33ef5 100644 --- a/onnxruntime/core/framework/execution_frame.h +++ b/onnxruntime/core/framework/execution_frame.h @@ -35,26 +35,26 @@ class ExecutionFrame { Status AllocateMLValueTensorSelfOwnBuffer(int mlvalue_index, MLDataType element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence = false); Status AllocateMLValueTensorPreAllocateBuffer(int mlvalue_index_to_allocate, int mlvalue_index_reuse, MLDataType element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence = false); // ?? Cheng: What about non-tensor values?? - // ?? Cheng: There are cases we may not want to use ONNXRUNTIME_ENFORCE?? + // ?? Cheng: There are cases we may not want to use ORT_ENFORCE?? // ?? Cheng: Graph must be immutable for GetNodesInTopologicalOrder?? // Create tensor at index mlvalue, and allocate buffer for it. // This tensor will own this buffer. // This method is not thread safe! Status AllocateTensorWithSelfOwnBuffer(int index, MLDataType element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence = false); @@ -66,22 +66,22 @@ class ExecutionFrame { Status AllocateTensorWithPreAllocateBuffer(int offset, void* pBuffer, MLDataType element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape); const MLValue& GetMLValue(int mlvalue_index) const { - ONNXRUNTIME_ENFORCE(mlvalue_index >= 0 && static_cast(mlvalue_index) < all_values_.size()); + ORT_ENFORCE(mlvalue_index >= 0 && static_cast(mlvalue_index) < all_values_.size()); return all_values_[mlvalue_index]; } MLValue& GetMutableMLValue(int mlvalue_index) { - ONNXRUNTIME_ENFORCE(mlvalue_index >= 0 && static_cast(mlvalue_index) < all_values_.size()); + ORT_ENFORCE(mlvalue_index >= 0 && static_cast(mlvalue_index) < all_values_.size()); return all_values_[mlvalue_index]; } // Index to the first argument of the given node. int GetFirstArgIndex(onnxruntime::NodeIndex index) const { - ONNXRUNTIME_ENFORCE(index < node_offsets_.size()); + ORT_ENFORCE(index < node_offsets_.size()); return node_offsets_[index]; } @@ -96,7 +96,7 @@ class ExecutionFrame { const MLValueAllocationParameters& parameters, MLValue*& p_mlvalue); - AllocatorPtr GetAllocator(const ONNXRuntimeAllocatorInfo& info); + AllocatorPtr GetAllocator(const OrtAllocatorInfo& info); Status ReleaseMLValue(int mlvalue_idx); @@ -111,7 +111,7 @@ class ExecutionFrame { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ExecutionFrame); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ExecutionFrame); // This method is not thread safe! void Release(int offset); @@ -121,7 +121,7 @@ class ExecutionFrame { Status AllocateMLValueTensorSelfOwnBufferHelper(int mlvalue_index, MLDataType element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape, bool create_fence); @@ -135,7 +135,7 @@ class ExecutionFrame { Status AllocateTensorWithPreAllocateBufferHelper(MLValue* p_mlvalue, void* pBuffer, MLDataType element_type, - const ONNXRuntimeAllocatorInfo& location, + const OrtAllocatorInfo& location, const TensorShape& shape); void TraceAllocate(int mlvalue_idx, size_t size); @@ -178,6 +178,6 @@ class ExecutionFrame { std::vector output_indices_; // Big chunks on different locations that will be used by mem_pattern. - std::map buffers_; + std::map buffers_; }; } // namespace onnxruntime diff --git a/onnxruntime/core/framework/execution_provider.cc b/onnxruntime/core/framework/execution_provider.cc index 86b58aed0bf68..96bc5e5bd4ea0 100644 --- a/onnxruntime/core/framework/execution_provider.cc +++ b/onnxruntime/core/framework/execution_provider.cc @@ -11,12 +11,12 @@ namespace onnxruntime { namespace { -inline int MakeKey(int id, ONNXRuntimeMemType mem_type) { +inline int MakeKey(int id, OrtMemType mem_type) { return id << 2 | mem_type; } } // namespace -AllocatorPtr IExecutionProvider::GetAllocator(int id, ONNXRuntimeMemType mem_type) const { +AllocatorPtr IExecutionProvider::GetAllocator(int id, OrtMemType mem_type) const { auto iter = allocators_.find(MakeKey(id, mem_type)); if (iter != allocators_.end()) { return iter->second; @@ -45,7 +45,7 @@ common::Status IExecutionProvider::CopyTensor(const Tensor& src, Tensor& dst, int exec_queue_id) const { // execution provider may override this to support different exec queues - ONNXRUNTIME_ENFORCE(exec_queue_id == 0); + ORT_ENFORCE(exec_queue_id == 0); return CopyTensor(src, dst); } @@ -56,11 +56,11 @@ common::Status IExecutionProvider::OnRunStart() { return Status::OK(); } common::Status IExecutionProvider::OnRunEnd() { return Status::OK(); } void IExecutionProvider::InsertAllocator(AllocatorPtr allocator) { - const ONNXRuntimeAllocatorInfo& info = allocator->Info(); + const OrtAllocatorInfo& info = allocator->Info(); const int key = MakeKey(info.id, info.mem_type); auto iter = allocators_.find(key); if (iter != allocators_.end()) { - ONNXRUNTIME_THROW("duplicated allocator"); + ORT_THROW("duplicated allocator"); } allocators_.insert(iter, {key, allocator}); } diff --git a/onnxruntime/core/framework/execution_providers.h b/onnxruntime/core/framework/execution_providers.h index a4c05b4bf8dd8..4156d234582b1 100644 --- a/onnxruntime/core/framework/execution_providers.h +++ b/onnxruntime/core/framework/execution_providers.h @@ -25,14 +25,14 @@ class ExecutionProviders { common::Status Add(const std::string& provider_id, std::unique_ptr p_exec_provider) { // make sure there are no issues before we change any internal data structures if (provider_idx_map_.find(provider_id) != provider_idx_map_.end()) { - auto status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Provider ", provider_id, " has already been registered."); + auto status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Provider ", provider_id, " has already been registered."); LOGS_DEFAULT(ERROR) << status.ErrorMessage(); return status; } for (const auto& allocator : p_exec_provider->GetAllocatorMap()) { if (allocator_idx_map_.find(allocator->Info()) != allocator_idx_map_.end()) { - auto status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, allocator->Info(), " allocator already registered."); + auto status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, allocator->Info(), " allocator already registered."); LOGS_DEFAULT(ERROR) << status.ErrorMessage(); return status; } @@ -41,10 +41,10 @@ class ExecutionProviders { // index that provider will have after insertion auto new_provider_idx = exec_providers_.size(); - ONNXRUNTIME_IGNORE_RETURN_VALUE(provider_idx_map_.insert({provider_id, new_provider_idx})); + ORT_IGNORE_RETURN_VALUE(provider_idx_map_.insert({provider_id, new_provider_idx})); for (const auto& allocator : p_exec_provider->GetAllocatorMap()) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(allocator_idx_map_.insert({allocator->Info(), new_provider_idx})); + ORT_IGNORE_RETURN_VALUE(allocator_idx_map_.insert({allocator->Info(), new_provider_idx})); } exec_providers_.push_back(std::move(p_exec_provider)); @@ -65,7 +65,7 @@ class ExecutionProviders { return exec_providers_[it->second].get(); } - const IExecutionProvider* Get(const ONNXRuntimeAllocatorInfo& allocator_info) const { + const IExecutionProvider* Get(const OrtAllocatorInfo& allocator_info) const { auto it = allocator_idx_map_.find(allocator_info); if (it == allocator_idx_map_.end()) { return nullptr; @@ -85,8 +85,8 @@ class ExecutionProviders { // maps for fast lookup of an index into exec_providers_ std::unordered_map provider_idx_map_; - // using std::map as ONNXRuntimeAllocatorInfo would need a custom hash function to be used with unordered_map, + // using std::map as OrtAllocatorInfo would need a custom hash function to be used with unordered_map, // and as this isn't performance critical it's not worth the maintenance overhead of adding one. - std::map allocator_idx_map_; + std::map allocator_idx_map_; }; } // namespace onnxruntime diff --git a/onnxruntime/core/framework/graph_partitioner.cc b/onnxruntime/core/framework/graph_partitioner.cc index 648f36c723d03..203cd365275c7 100644 --- a/onnxruntime/core/framework/graph_partitioner.cc +++ b/onnxruntime/core/framework/graph_partitioner.cc @@ -73,8 +73,8 @@ Status GraphPartitioner::Partition(onnxruntime::Graph& graph) const { if (nullptr == capability->sub_graph->GetMetaDef()) { // The can run a single node in the if not using meta-defs. // A fused kernel is not supported in this case. - ONNXRUNTIME_ENFORCE(1 == capability->sub_graph->nodes.size()); - ONNXRUNTIME_ENFORCE(capability->fuse_kernel_function == nullptr); + ORT_ENFORCE(1 == capability->sub_graph->nodes.size()); + ORT_ENFORCE(capability->fuse_kernel_function == nullptr); auto node = graph.GetNode(capability->sub_graph->nodes[0]); if (nullptr != node && node->GetExecutionProviderType().empty()) { @@ -84,7 +84,7 @@ Status GraphPartitioner::Partition(onnxruntime::Graph& graph) const { // The can run a fused in the . // // Add fused node into - ONNXRUNTIME_ENFORCE(nullptr != capability->sub_graph->GetMetaDef()); + ORT_ENFORCE(nullptr != capability->sub_graph->GetMetaDef()); std::string node_name = provider->Type() + "_" + capability->sub_graph->GetMetaDef()->name + "_" + std::to_string(count++); auto& fused_node = graph.FuseSubGraph(std::move(capability->sub_graph), node_name); fused_node.SetExecutionProviderType(provider->Type()); @@ -99,7 +99,7 @@ Status GraphPartitioner::Partition(onnxruntime::Graph& graph) const { } // all done with this provider, resolve the graph before we move on to the next provider. // This is needed since we create a new GraphViewer() that we pass into the next provider's GetCapability(). - ONNXRUNTIME_ENFORCE(graph.Resolve().IsOK()); + ORT_ENFORCE(graph.Resolve().IsOK()); } // To see if the node with no provider can be inlined. If one such nodes can be @@ -122,7 +122,7 @@ Status GraphPartitioner::Partition(onnxruntime::Graph& graph) const { } // Resolve and rerun graph partition if (inline_flag) { - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); this->Partition(graph); } diff --git a/onnxruntime/core/framework/graph_partitioner.h b/onnxruntime/core/framework/graph_partitioner.h index b03ad8c844bf5..ca3c56bf75e7f 100644 --- a/onnxruntime/core/framework/graph_partitioner.h +++ b/onnxruntime/core/framework/graph_partitioner.h @@ -23,7 +23,7 @@ class GraphPartitioner { Status Partition(onnxruntime::Graph& graph) const; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphPartitioner); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphPartitioner); KernelRegistryManager& kernel_registry_mgr_; const ExecutionProviders& providers_; diff --git a/onnxruntime/core/framework/insert_cast_transformer.cc b/onnxruntime/core/framework/insert_cast_transformer.cc index eb875f421880e..d7ff4d164b08f 100644 --- a/onnxruntime/core/framework/insert_cast_transformer.cc +++ b/onnxruntime/core/framework/insert_cast_transformer.cc @@ -90,9 +90,9 @@ Status ForceSingleNodeCPUFloat16ToFloat32(onnxruntime::Graph& graph) { } Status InsertCastTransformer::Apply(onnxruntime::Graph& graph, bool& modified) const { - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); if (force_cpu_fp32_) - ONNXRUNTIME_RETURN_IF_ERROR(ForceSingleNodeCPUFloat16ToFloat32(graph)); + ORT_RETURN_IF_ERROR(ForceSingleNodeCPUFloat16ToFloat32(graph)); GraphViewer graph_viewer(graph); auto& order = graph_viewer.GetNodesInTopologicalOrder(); @@ -163,7 +163,7 @@ Status InsertCastTransformer::Apply(onnxruntime::Graph& graph, bool& modified) c } //Resolve it to build the edges. - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); std::map replacement_defs; std::vector removed_nodes; for (auto& node : graph.Nodes()) { diff --git a/onnxruntime/core/framework/kernel_registry.cc b/onnxruntime/core/framework/kernel_registry.cc index 81c6fae98702b..a6afcda22e5a1 100644 --- a/onnxruntime/core/framework/kernel_registry.cc +++ b/onnxruntime/core/framework/kernel_registry.cc @@ -13,7 +13,7 @@ const ::ONNX_NAMESPACE::TypeProto* FindTypeBinding(const onnxruntime::Node& node const ONNX_NAMESPACE::OpSchema& op_schema = *node.Op(); // search inputs: const size_t len = node.InputArgCount().size(); - ONNXRUNTIME_ENFORCE(len <= op_schema.inputs().size()); + ORT_ENFORCE(len <= op_schema.inputs().size()); int actual_index = 0; for (size_t formal_index = 0; formal_index != len; ++formal_index) { auto& param = op_schema.inputs()[formal_index]; diff --git a/onnxruntime/core/framework/kernel_registry_manager.h b/onnxruntime/core/framework/kernel_registry_manager.h index e41fb3cb03da7..d8523fc1049f8 100644 --- a/onnxruntime/core/framework/kernel_registry_manager.h +++ b/onnxruntime/core/framework/kernel_registry_manager.h @@ -54,7 +54,7 @@ class KernelRegistryManager { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(KernelRegistryManager); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(KernelRegistryManager); // This list stores all kernel registries shared across sessions, including common ones and customized ones. std::list> kernel_registries_; diff --git a/onnxruntime/core/framework/mem_pattern.h b/onnxruntime/core/framework/mem_pattern.h index 916d37ec0815a..57d9e99360b13 100644 --- a/onnxruntime/core/framework/mem_pattern.h +++ b/onnxruntime/core/framework/mem_pattern.h @@ -44,17 +44,17 @@ class MemoryPattern { private: // allow move - ONNXRUNTIME_DISALLOW_COPY_AND_ASSIGNMENT(MemoryPattern); + ORT_DISALLOW_COPY_AND_ASSIGNMENT(MemoryPattern); std::unordered_map patterns_; size_t peak_size_{0}; }; struct MemoryPatternGroup { - std::vector locations; + std::vector locations; std::vector patterns; - const MemoryPattern* GetPatterns(const ONNXRuntimeAllocatorInfo& location) const { + const MemoryPattern* GetPatterns(const OrtAllocatorInfo& location) const { for (size_t i = 0; i < locations.size(); i++) if (locations[i] == location) { return &patterns[i]; diff --git a/onnxruntime/core/framework/ml_value_pattern_planner.cc b/onnxruntime/core/framework/ml_value_pattern_planner.cc index 5f765c1ede71b..f5da571f5d6f4 100644 --- a/onnxruntime/core/framework/ml_value_pattern_planner.cc +++ b/onnxruntime/core/framework/ml_value_pattern_planner.cc @@ -8,7 +8,7 @@ namespace onnxruntime { MLValuePatternPlanner::MLValuePatternPlanner(const SequentialExecutionPlan& execution_plan) : execution_planner_{execution_plan} { - std::set locations; + std::set locations; for (auto& alloc_plan : execution_planner_.allocation_plan) { if (locations.find(alloc_plan.location) == locations.end()) locations.insert(alloc_plan.location); diff --git a/onnxruntime/core/framework/ml_value_patterns_planner.h b/onnxruntime/core/framework/ml_value_patterns_planner.h index 250f68c45d7e8..17b886d651b16 100644 --- a/onnxruntime/core/framework/ml_value_patterns_planner.h +++ b/onnxruntime/core/framework/ml_value_patterns_planner.h @@ -54,10 +54,10 @@ class MLValuePatternPlanner { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(MLValuePatternPlanner); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(MLValuePatternPlanner); mutable std::mutex lock_; - std::map planner_map_; + std::map planner_map_; std::vector > pattern_planners_; const SequentialExecutionPlan& execution_planner_; }; diff --git a/onnxruntime/core/framework/mlvalue_name_idx_map.h b/onnxruntime/core/framework/mlvalue_name_idx_map.h index 9dbc720e2be62..4fe95fce37c25 100644 --- a/onnxruntime/core/framework/mlvalue_name_idx_map.h +++ b/onnxruntime/core/framework/mlvalue_name_idx_map.h @@ -35,7 +35,7 @@ class MLValueNameIdxMap { auto it = map_.find(name); if (it == map_.end()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Could not find MLValue with name: ", name); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Could not find MLValue with name: ", name); } idx = it->second; @@ -49,7 +49,7 @@ class MLValueNameIdxMap { const_iterator end() const noexcept { return map_.cend(); } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(MLValueNameIdxMap); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(MLValueNameIdxMap); int mlvalue_max_idx_ = 0; std::unordered_map map_; diff --git a/onnxruntime/core/framework/mlvalue_tensor_slicer.cc b/onnxruntime/core/framework/mlvalue_tensor_slicer.cc index 294e95668f2ca..dd5e1460cf2e5 100644 --- a/onnxruntime/core/framework/mlvalue_tensor_slicer.cc +++ b/onnxruntime/core/framework/mlvalue_tensor_slicer.cc @@ -11,15 +11,15 @@ MLValueTensorSlicer MLValueTensorSlicer::Create(T& mlvalue, int64_t slice_ static_assert(std::is_same, MLValue>::value, "MLValueTensorSlicer can only be used with 'MLValue' or 'const MLValue'"); - ONNXRUNTIME_ENFORCE(mlvalue.IsTensor(), "Can't slice a non-tensor MLValue. Type was ", mlvalue.Type()); - ONNXRUNTIME_ENFORCE(mlvalue.IsAllocated(), "MLValue has not been allocated so can't be sliced."); + ORT_ENFORCE(mlvalue.IsTensor(), "Can't slice a non-tensor MLValue. Type was ", mlvalue.Type()); + ORT_ENFORCE(mlvalue.IsAllocated(), "MLValue has not been allocated so can't be sliced."); auto& tensor_shape{mlvalue.template Get().Shape()}; - ONNXRUNTIME_ENFORCE(gsl::narrow_cast(tensor_shape.NumDimensions()) >= slice_dimension, + ORT_ENFORCE(gsl::narrow_cast(tensor_shape.NumDimensions()) >= slice_dimension, "Insufficient dimensions to slice on ", slice_dimension, ". Shape:", tensor_shape); auto dim0_size = tensor_shape[0]; - ONNXRUNTIME_ENFORCE(dim0_offset < dim0_size, "Invalid dim0_offset of ", dim0_offset, ". Dimension 0 is ", dim0_size); + ORT_ENFORCE(dim0_offset < dim0_size, "Invalid dim0_offset of ", dim0_offset, ". Dimension 0 is ", dim0_size); return MLValueTensorSlicer{mlvalue, slice_dimension, dim0_offset}; }; diff --git a/onnxruntime/core/framework/mlvalue_tensor_slicer.h b/onnxruntime/core/framework/mlvalue_tensor_slicer.h index bf26ba95c7440..92e1ba6fb1a4b 100644 --- a/onnxruntime/core/framework/mlvalue_tensor_slicer.h +++ b/onnxruntime/core/framework/mlvalue_tensor_slicer.h @@ -77,7 +77,7 @@ class MLValueTensorSlicer { // const accessor is always enabled const_reference operator*() const { - ONNXRUNTIME_ENFORCE(position_ >= 0 && position_ < sequence_length_); + ORT_ENFORCE(position_ >= 0 && position_ < sequence_length_); if (position_ != position_materialized_) { MaterializeMLValue(); } @@ -87,7 +87,7 @@ class MLValueTensorSlicer { // non-const is only enabled if T is not const (i.e. is 'MLValue' not 'const MLValue') std::enable_if_t::value, reference> operator*() { - ONNXRUNTIME_ENFORCE(position_ >= 0 && position_ < sequence_length_); + ORT_ENFORCE(position_ >= 0 && position_ < sequence_length_); if (position_ != position_materialized_) { MaterializeMLValue(); } @@ -109,7 +109,7 @@ class MLValueTensorSlicer { const void* tensor_data_raw_; MLDataType tensor_data_type_; - const ONNXRuntimeAllocatorInfo* tensor_location_; + const OrtAllocatorInfo* tensor_location_; int64_t sequence_length_; TensorShape per_iteration_shape_; diff --git a/onnxruntime/core/framework/onnx_object.cc b/onnxruntime/core/framework/onnx_object.cc index 6cdb9cd0abb1d..d7232e477c8d7 100644 --- a/onnxruntime/core/framework/onnx_object.cc +++ b/onnxruntime/core/framework/onnx_object.cc @@ -4,11 +4,11 @@ #include "core/session/onnxruntime_c_api.h" #include -ONNXRUNTIME_API(uint32_t, ONNXRuntimeAddRefToObject, void* ptr) { +ORT_API(uint32_t, OrtAddRefToObject, void* ptr) { return (*static_cast(ptr))->AddRef(ptr); } -ONNXRUNTIME_API(uint32_t, ONNXRuntimeReleaseObject, void* ptr) { +ORT_API(uint32_t, OrtReleaseObject, void* ptr) { if (ptr == nullptr) return 0; return (*static_cast(ptr))->Release(ptr); } diff --git a/onnxruntime/core/framework/onnxruntime_typeinfo.cc b/onnxruntime/core/framework/onnxruntime_typeinfo.cc index 294f9faa60d4c..0693daa2f4ff1 100644 --- a/onnxruntime/core/framework/onnxruntime_typeinfo.cc +++ b/onnxruntime/core/framework/onnxruntime_typeinfo.cc @@ -13,43 +13,43 @@ using onnxruntime::MLFloat16; using onnxruntime::Tensor; using onnxruntime::TensorShape; -ONNXRuntimeTypeInfo::ONNXRuntimeTypeInfo(ONNXRuntimeType type1, void* data1) noexcept : type(type1), data(data1) { +OrtTypeInfo::OrtTypeInfo(OrtType type1, void* data1) noexcept : type(type1), data(data1) { } -ONNXRuntimeTypeInfo::~ONNXRuntimeTypeInfo() { +OrtTypeInfo::~OrtTypeInfo() { assert(ref_count == 0); - ONNXRuntimeReleaseObject(data); + OrtReleaseObject(data); } -ONNXRUNTIME_API(const struct ONNXRuntimeTensorTypeAndShapeInfo*, ONNXRuntimeCastTypeInfoToTensorInfo, _In_ struct ONNXRuntimeTypeInfo* input) { - return input->type == ONNXRUNTIME_TYPE_TENSOR ? reinterpret_cast(input->data) : nullptr; +ORT_API(const struct OrtTensorTypeAndShapeInfo*, OrtCastTypeInfoToTensorInfo, _In_ struct OrtTypeInfo* input) { + return input->type == ORT_TYPE_TENSOR ? reinterpret_cast(input->data) : nullptr; } -ONNXStatus* GetTensorShapeAndType(const TensorShape* shape, const onnxruntime::DataTypeImpl* tensor_data_type, ONNXRuntimeTensorTypeAndShapeInfo** out); +ONNXStatus* GetTensorShapeAndType(const TensorShape* shape, const onnxruntime::DataTypeImpl* tensor_data_type, OrtTensorTypeAndShapeInfo** out); -ONNXStatus* ONNXRuntimeTypeInfo::FromDataTypeImpl(const onnxruntime::DataTypeImpl* input, const TensorShape* shape, const onnxruntime::DataTypeImpl* tensor_data_type, ONNXRuntimeTypeInfo** out) { +ONNXStatus* OrtTypeInfo::FromDataTypeImpl(const onnxruntime::DataTypeImpl* input, const TensorShape* shape, const onnxruntime::DataTypeImpl* tensor_data_type, OrtTypeInfo** out) { if (input == nullptr) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_UNKNOWN, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_UNKNOWN, nullptr); return nullptr; } if (input == DataTypeImpl::GetType()) { - ONNXRuntimeTensorTypeAndShapeInfo* info = nullptr; + OrtTensorTypeAndShapeInfo* info = nullptr; if (tensor_data_type != nullptr) { ONNXStatus* st = GetTensorShapeAndType(shape, tensor_data_type, &info); if (st != nullptr) return st; } - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_TENSOR, info); + *out = new OrtTypeInfo(ORT_TYPE_TENSOR, info); return nullptr; } if (input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType()) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_MAP, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_MAP, nullptr); return nullptr; } if (input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType() || input == DataTypeImpl::GetType()) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_SEQUENCE, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_SEQUENCE, nullptr); return nullptr; } - return CreateONNXStatus(ONNXRUNTIME_NOT_IMPLEMENTED, "not implemented"); + return CreateONNXStatus(ORT_NOT_IMPLEMENTED, "not implemented"); } const DataTypeImpl* ElementTypeFromProto(ONNX_NAMESPACE::TensorProto_DataType type) { @@ -81,16 +81,16 @@ const DataTypeImpl* ElementTypeFromProto(ONNX_NAMESPACE::TensorProto_DataType ty case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: return DataTypeImpl::GetType(); default: - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, ":tensor type ", type, " is not supported"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, ":tensor type ", type, " is not supported"); } } -ONNXStatus* ONNXRuntimeTypeInfo::FromDataTypeImpl(const onnx::TypeProto* input, ONNXRuntimeTypeInfo** out) { +ONNXStatus* OrtTypeInfo::FromDataTypeImpl(const onnx::TypeProto* input, OrtTypeInfo** out) { if (input->has_tensor_type()) { const ::onnx::TypeProto_Tensor& onnx_tensor_info = input->tensor_type(); const DataTypeImpl* type = ElementTypeFromProto(onnx_tensor_info.elem_type()); ONNXStatus* st; - ONNXRuntimeTensorTypeAndShapeInfo* info = nullptr; + OrtTensorTypeAndShapeInfo* info = nullptr; if (onnx_tensor_info.has_shape()) { const ::onnx::TensorShapeProto& s = onnx_tensor_info.shape(); std::vector shape_data(s.dim_size()); @@ -104,24 +104,24 @@ ONNXStatus* ONNXRuntimeTypeInfo::FromDataTypeImpl(const onnx::TypeProto* input, } if (st != nullptr) return st; - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_TENSOR, info); + *out = new OrtTypeInfo(ORT_TYPE_TENSOR, info); return nullptr; } if (input->has_sequence_type()) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_SEQUENCE, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_SEQUENCE, nullptr); return nullptr; } if (input->has_map_type()) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_MAP, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_MAP, nullptr); return nullptr; } if (input->has_opaque_type()) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_OPAQUE, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_OPAQUE, nullptr); return nullptr; } if (input->has_sparse_tensor_type()) { - *out = new ONNXRuntimeTypeInfo(ONNXRUNTIME_TYPE_SPARSETENSOR, nullptr); + *out = new OrtTypeInfo(ORT_TYPE_SPARSETENSOR, nullptr); return nullptr; } - return CreateONNXStatus(ONNXRUNTIME_NOT_IMPLEMENTED, "not implemented"); + return CreateONNXStatus(ORT_NOT_IMPLEMENTED, "not implemented"); } diff --git a/onnxruntime/core/framework/onnxruntime_typeinfo.h b/onnxruntime/core/framework/onnxruntime_typeinfo.h index b7477b2f4e0e4..7c2f5331bc76c 100644 --- a/onnxruntime/core/framework/onnxruntime_typeinfo.h +++ b/onnxruntime/core/framework/onnxruntime_typeinfo.h @@ -18,21 +18,21 @@ class TypeProto; * the equivalent of onnx::TypeProto * This class is mainly for the C API */ -struct ONNXRuntimeTypeInfo : public onnxruntime::ObjectBase { +struct OrtTypeInfo : public onnxruntime::ObjectBase { public: - friend class onnxruntime::ObjectBase; + friend class onnxruntime::ObjectBase; - ONNXRuntimeType type = ONNXRUNTIME_TYPE_UNKNOWN; + OrtType type = ORT_TYPE_UNKNOWN; //owned by this void* data = nullptr; - ONNXRuntimeTypeInfo(const ONNXRuntimeTypeInfo& other) = delete; - ONNXRuntimeTypeInfo& operator=(const ONNXRuntimeTypeInfo& other) = delete; + OrtTypeInfo(const OrtTypeInfo& other) = delete; + OrtTypeInfo& operator=(const OrtTypeInfo& other) = delete; static ONNXStatus* FromDataTypeImpl(const onnxruntime::DataTypeImpl* input, const onnxruntime::TensorShape* shape, - const onnxruntime::DataTypeImpl* tensor_data_type, ONNXRuntimeTypeInfo** out); - static ONNXStatus* FromDataTypeImpl(const onnx::TypeProto*, ONNXRuntimeTypeInfo** out); + const onnxruntime::DataTypeImpl* tensor_data_type, OrtTypeInfo** out); + static ONNXStatus* FromDataTypeImpl(const onnx::TypeProto*, OrtTypeInfo** out); private: - ONNXRuntimeTypeInfo(ONNXRuntimeType type, void* data) noexcept; - ~ONNXRuntimeTypeInfo(); + OrtTypeInfo(OrtType type, void* data) noexcept; + ~OrtTypeInfo(); }; diff --git a/onnxruntime/core/framework/op_kernel.cc b/onnxruntime/core/framework/op_kernel.cc index 3813688cc65d0..ad1a6d11498d1 100644 --- a/onnxruntime/core/framework/op_kernel.cc +++ b/onnxruntime/core/framework/op_kernel.cc @@ -15,8 +15,8 @@ OpKernelContext::OpKernelContext(ExecutionFrame* frame, : execution_frame_(frame), kernel_(kernel), logger_(&logger) { - ONNXRUNTIME_ENFORCE(frame != nullptr, "Execution frame was null"); - ONNXRUNTIME_ENFORCE(kernel != nullptr, "OpKernel was null"); + ORT_ENFORCE(frame != nullptr, "Execution frame was null"); + ORT_ENFORCE(kernel != nullptr, "OpKernel was null"); node_input_start_index_ = frame->GetFirstArgIndex(kernel->Node().Index()); node_implicit_input_start_index_ = node_input_start_index_ + InputCount(); @@ -37,20 +37,20 @@ Tensor* OpKernelContext::Output(int index, const TensorShape& shape) { //I believe it's a false alarm. MLValue* p_ml_value = nullptr; Status status = execution_frame_->GetOrCreateNodeOutputMLValue(GetOutputArgIndex(index), parameters, p_ml_value); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); return p_ml_value ? p_ml_value->GetMutable() : nullptr; } int OpKernelContext::NumVariadicInputs(size_t arg_num) const { auto& arg_counts = kernel_->Node().InputArgCount(); - ONNXRUNTIME_ENFORCE(arg_num < arg_counts.size(), "Invalid arg_num of ", arg_num, ". Num args is ", arg_counts.size()); + ORT_ENFORCE(arg_num < arg_counts.size(), "Invalid arg_num of ", arg_num, ". Num args is ", arg_counts.size()); return arg_counts[arg_num]; } Status OpKernelContext::GetTempSpaceAllocator(AllocatorPtr* output) const { - *output = execution_frame_->GetAllocator(kernel_->Allocator(0, ONNXRuntimeMemTypeDefault)); + *output = execution_frame_->GetAllocator(kernel_->Allocator(0, OrtMemTypeDefault)); if (!*output) return Status(common::ONNXRUNTIME, common::FAIL, "TempSpace allocator not found"); return Status::OK(); @@ -98,7 +98,7 @@ Fence_t OpKernelContext::OutputFence(int index) const { Status OpKernelContext::GetOrCreateOutputMLValue(int index, MLValue*& p_value) { auto output_arg_index = GetOutputArgIndex(index); MLValueAllocationParameters parameters; - ONNXRUNTIME_ENFORCE(execution_frame_->GetOrCreateNodeOutputMLValue(output_arg_index, parameters, p_value).IsOK()); + ORT_ENFORCE(execution_frame_->GetOrCreateNodeOutputMLValue(output_arg_index, parameters, p_value).IsOK()); return Status::OK(); } diff --git a/onnxruntime/core/framework/op_kernel_info.cc b/onnxruntime/core/framework/op_kernel_info.cc index a63057869372d..204bea58f8f9d 100644 --- a/onnxruntime/core/framework/op_kernel_info.cc +++ b/onnxruntime/core/framework/op_kernel_info.cc @@ -21,9 +21,9 @@ OpKernelInfo::OpKernelInfo(const onnxruntime::Node& node, OpKernelInfo::OpKernelInfo(const OpKernelInfo& other) : OpKernelInfo(other.node_, other.kernel_def_, *other.execution_provider_, other.session_state_) {} -const ONNXRuntimeAllocatorInfo& OpKernelInfo::GetAllocatorInfo(int device_id, ONNXRuntimeMemType mem_type) const { +const OrtAllocatorInfo& OpKernelInfo::GetAllocatorInfo(int device_id, OrtMemType mem_type) const { AllocatorPtr alloc = execution_provider_->GetAllocator(device_id, mem_type); - if (alloc == nullptr) ONNXRUNTIME_THROW("cannot find allocator"); + if (alloc == nullptr) ORT_THROW("cannot find allocator"); return alloc->Info(); } diff --git a/onnxruntime/core/framework/op_node_proto_helper.cc b/onnxruntime/core/framework/op_node_proto_helper.cc index e85f27f63627d..e2f9d38b70a3e 100644 --- a/onnxruntime/core/framework/op_node_proto_helper.cc +++ b/onnxruntime/core/framework/op_node_proto_helper.cc @@ -14,14 +14,14 @@ using namespace ONNX_NAMESPACE; using namespace ::onnxruntime::common; namespace onnxruntime { -#define ONNXRUNTIME_DEFINE_GET_ATTR(IMPL_T, T, type) \ +#define ORT_DEFINE_GET_ATTR(IMPL_T, T, type) \ template <> \ template <> \ Status OpNodeProtoHelper::GetAttr( \ const std::string& name, T* value) const { \ const AttributeProto* attr = TryGetAttribute(name); \ if (!attr) { \ - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "No attribute with name:'", name, "'is defined."); \ + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "No attribute with name:'", name, "'is defined."); \ } \ if (!attr->has_##type()) { \ return Status(ONNXRUNTIME, FAIL, "Attibute name and type don't match"); \ @@ -31,7 +31,7 @@ namespace onnxruntime { } \ } -#define ONNXRUNTIME_DEFINE_GET_ATTRS(IMPL_T, T, list) \ +#define ORT_DEFINE_GET_ATTRS(IMPL_T, T, list) \ template <> \ template <> \ Status OpNodeProtoHelper::GetAttrs( \ @@ -54,31 +54,31 @@ namespace onnxruntime { if (!attr) { \ return Status(ONNXRUNTIME, FAIL, "No attribute with this name is defined."); \ } \ - ONNXRUNTIME_ENFORCE(values.size() == attr->list##_size()); \ + ORT_ENFORCE(values.size() == attr->list##_size()); \ for (int i = 0; i < attr->list##_size(); ++i) { \ values[i] = static_cast(attr->list(i)); \ } \ return Status::OK(); \ } -#define ONNXRUNTIME_DEFINE_GET_ATTR_SPECIALIZATIONS(type, list) \ - ONNXRUNTIME_DEFINE_GET_ATTR(ProtoHelperNodeContext, type, list) \ - ONNXRUNTIME_DEFINE_GET_ATTR(InferenceContext, type, list) - -#define ONNXRUNTIME_DEFINE_GET_ATTRS_SPECIALIZATIONS(type, list) \ - ONNXRUNTIME_DEFINE_GET_ATTRS(ProtoHelperNodeContext, type, list) \ - ONNXRUNTIME_DEFINE_GET_ATTRS(InferenceContext, type, list) - -ONNXRUNTIME_DEFINE_GET_ATTR_SPECIALIZATIONS(float, f) -ONNXRUNTIME_DEFINE_GET_ATTR_SPECIALIZATIONS(int64_t, i) -ONNXRUNTIME_DEFINE_GET_ATTR_SPECIALIZATIONS(std::string, s) -ONNXRUNTIME_DEFINE_GET_ATTR_SPECIALIZATIONS(TensorProto, t) -ONNXRUNTIME_DEFINE_GET_ATTR_SPECIALIZATIONS(GraphProto, g) -ONNXRUNTIME_DEFINE_GET_ATTRS_SPECIALIZATIONS(float, floats) -ONNXRUNTIME_DEFINE_GET_ATTRS_SPECIALIZATIONS(int64_t, ints) -ONNXRUNTIME_DEFINE_GET_ATTRS_SPECIALIZATIONS(std::string, strings) -ONNXRUNTIME_DEFINE_GET_ATTRS_SPECIALIZATIONS(TensorProto, tensors) -ONNXRUNTIME_DEFINE_GET_ATTRS_SPECIALIZATIONS(GraphProto, graphs) +#define ORT_DEFINE_GET_ATTR_SPECIALIZATIONS(type, list) \ + ORT_DEFINE_GET_ATTR(ProtoHelperNodeContext, type, list) \ + ORT_DEFINE_GET_ATTR(InferenceContext, type, list) + +#define ORT_DEFINE_GET_ATTRS_SPECIALIZATIONS(type, list) \ + ORT_DEFINE_GET_ATTRS(ProtoHelperNodeContext, type, list) \ + ORT_DEFINE_GET_ATTRS(InferenceContext, type, list) + +ORT_DEFINE_GET_ATTR_SPECIALIZATIONS(float, f) +ORT_DEFINE_GET_ATTR_SPECIALIZATIONS(int64_t, i) +ORT_DEFINE_GET_ATTR_SPECIALIZATIONS(std::string, s) +ORT_DEFINE_GET_ATTR_SPECIALIZATIONS(TensorProto, t) +ORT_DEFINE_GET_ATTR_SPECIALIZATIONS(GraphProto, g) +ORT_DEFINE_GET_ATTRS_SPECIALIZATIONS(float, floats) +ORT_DEFINE_GET_ATTRS_SPECIALIZATIONS(int64_t, ints) +ORT_DEFINE_GET_ATTRS_SPECIALIZATIONS(std::string, strings) +ORT_DEFINE_GET_ATTRS_SPECIALIZATIONS(TensorProto, tensors) +ORT_DEFINE_GET_ATTRS_SPECIALIZATIONS(GraphProto, graphs) size_t ProtoHelperNodeContext::getNumInputs() const { return node_.InputDefs().size(); diff --git a/onnxruntime/core/framework/parallel_executor.cc b/onnxruntime/core/framework/parallel_executor.cc index 72d416947c0a4..98b8495a4d704 100644 --- a/onnxruntime/core/framework/parallel_executor.cc +++ b/onnxruntime/core/framework/parallel_executor.cc @@ -51,7 +51,7 @@ Status ParallelExecutor::Execute(const SessionState& session_state, } VLOGS(logger, 1) << "Fetching output."; - ONNXRUNTIME_RETURN_IF_ERROR(FetchOutput(session_state.GetMLValueNameIdxMap(), *root_frame_, output_names, fetches, logger)); + ORT_RETURN_IF_ERROR(FetchOutput(session_state.GetMLValueNameIdxMap(), *root_frame_, output_names, fetches, logger)); if (root_frame_->HasPlan()) { std::vector input_shapes; @@ -67,8 +67,8 @@ Status ParallelExecutor::Execute(const SessionState& session_state, if (all_tensors) { auto mem_patterns = std::make_unique(); - ONNXRUNTIME_RETURN_IF_ERROR(root_frame_->GeneratePatterns(mem_patterns.get())); - ONNXRUNTIME_RETURN_IF_ERROR(session_state.UpdateMemoryPatternGroupCache(input_shapes, std::move(mem_patterns))); + ORT_RETURN_IF_ERROR(root_frame_->GeneratePatterns(mem_patterns.get())); + ORT_RETURN_IF_ERROR(session_state.UpdateMemoryPatternGroupCache(input_shapes, std::move(mem_patterns))); } } @@ -101,14 +101,14 @@ void ParallelExecutor::RunNodeAsyncInternal(size_t p_node_index, // to also handle exception propagation if (terminate_flag_) { LOGS(logger, WARNING) << "Exiting due to terminate flag being set to true."; - ONNXRUNTIME_THROW("Exiting due to terminate flag being set to true."); + ORT_THROW("Exiting due to terminate flag being set to true."); } auto p_op_kernel = session_state.GetKernel(node_index); // if a kernel has been added in the session state, it better be NON-null. if (p_op_kernel == nullptr) { - ONNXRUNTIME_THROW("Got nullptr from GetKernel for node: ", + ORT_THROW("Got nullptr from GetKernel for node: ", graph_viewer->GetNode(node_index)->Name()); } @@ -157,7 +157,7 @@ void ParallelExecutor::RunNodeAsyncInternal(size_t p_node_index, // Execute the kernel. auto status = p_op_kernel->Compute(&op_kernel_context); if (!status.IsOK()) { - ONNXRUNTIME_THROW("Compute failed for node: ", graph_viewer->GetNode(node_index)->Name()); + ORT_THROW("Compute failed for node: ", graph_viewer->GetNode(node_index)->Name()); } session_state.Profiler().EndTimeAndRecordEvent(profiling::NODE_EVENT, @@ -240,7 +240,7 @@ Status ParallelExecutor::FetchOutput(const MLValueNameIdxMap& name_idx_map, fetches.resize(output_names.size()); } else { // this should've been checked before already - ONNXRUNTIME_ENFORCE(output_names.size() == fetches.size(), + ORT_ENFORCE(output_names.size() == fetches.size(), "output_names vector size: " + std::to_string(output_names.size()) + " does not match that of fetches vector: " + std::to_string(fetches.size())); } @@ -250,7 +250,7 @@ Status ParallelExecutor::FetchOutput(const MLValueNameIdxMap& name_idx_map, for (const auto& oname : output_names) { VLOGS(logger, 1) << "Attempting to fetch output with name: " << oname; int mlvalue_index; - ONNXRUNTIME_RETURN_IF_ERROR(name_idx_map.GetIdx(oname, mlvalue_index)); + ORT_RETURN_IF_ERROR(name_idx_map.GetIdx(oname, mlvalue_index)); const MLValue& output_mlvalue = frame.GetMLValue(mlvalue_index); VLOGS(logger, 1) << "Copying fetched MLValue to output vector"; fetches[idx++] = output_mlvalue; diff --git a/onnxruntime/core/framework/parallel_executor.h b/onnxruntime/core/framework/parallel_executor.h index 5d4aa551a073f..8d85b905f8aef 100644 --- a/onnxruntime/core/framework/parallel_executor.h +++ b/onnxruntime/core/framework/parallel_executor.h @@ -30,7 +30,7 @@ class ParallelExecutor : public IExecutor { const logging::Logger& logger) override; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ParallelExecutor); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ParallelExecutor); void RunNodeAsync(size_t p_node_index, const SessionState& session_state, const logging::Logger& logger); void RunNodeAsyncInternal(size_t p_node_index, const SessionState& session_state, const logging::Logger& logger); diff --git a/onnxruntime/core/framework/run_options.cc b/onnxruntime/core/framework/run_options.cc index 4462f92b7aeca..614a504330485 100644 --- a/onnxruntime/core/framework/run_options.cc +++ b/onnxruntime/core/framework/run_options.cc @@ -6,29 +6,29 @@ #include #include -ONNXRUNTIME_API(ONNXRuntimeRunOptions*, ONNXRuntimeCreateRunOptions) { - std::unique_ptr options = std::make_unique(); +ORT_API(OrtRunOptions*, OrtCreateRunOptions) { + std::unique_ptr options = std::make_unique(); return options.release(); } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunOptionsSetRunLogVerbosityLevel, _In_ ONNXRuntimeRunOptions* options, unsigned int value) { +ORT_API_STATUS_IMPL(OrtRunOptionsSetRunLogVerbosityLevel, _In_ OrtRunOptions* options, unsigned int value) { options->run_log_verbosity_level = value; return nullptr; } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunOptionsSetRunTag, _In_ ONNXRuntimeRunOptions* options, _In_ const char* run_tag) { +ORT_API_STATUS_IMPL(OrtRunOptionsSetRunTag, _In_ OrtRunOptions* options, _In_ const char* run_tag) { if (run_tag) options->run_tag = run_tag; return nullptr; } -ONNXRUNTIME_API(unsigned int, ONNXRuntimeRunOptionsGetRunLogVerbosityLevel, _In_ ONNXRuntimeRunOptions* options) { +ORT_API(unsigned int, OrtRunOptionsGetRunLogVerbosityLevel, _In_ OrtRunOptions* options) { return options->run_log_verbosity_level; } -ONNXRUNTIME_API(const char*, ONNXRuntimeRunOptionsGetRunTag, _In_ ONNXRuntimeRunOptions* options) { +ORT_API(const char*, OrtRunOptionsGetRunTag, _In_ OrtRunOptions* options) { return options->run_tag.c_str(); } -ONNXRUNTIME_API(void, ONNXRuntimeRunOptionsSetTerminate, _In_ ONNXRuntimeRunOptions* options, bool value) { +ORT_API(void, OrtRunOptionsSetTerminate, _In_ OrtRunOptions* options, bool value) { options->terminate = value; } diff --git a/onnxruntime/core/framework/sequential_execution_plan.h b/onnxruntime/core/framework/sequential_execution_plan.h index a3c65f33ac8a8..494c39820abe3 100644 --- a/onnxruntime/core/framework/sequential_execution_plan.h +++ b/onnxruntime/core/framework/sequential_execution_plan.h @@ -29,7 +29,7 @@ struct SequentialExecutionPlan { struct AllocPlanPerValue { AllocKind alloc_kind{AllocKind::kAllocate}; MLDataType value_type{nullptr}; - ONNXRuntimeAllocatorInfo location; + OrtAllocatorInfo location; // reused_buffer is valid only if alloc_kind == kReuse. It indicates // which MLValue's buffer must be reused for this MLValue. MLValueIndex reused_buffer{0}; @@ -38,7 +38,7 @@ struct SequentialExecutionPlan { bool create_fence_if_async{false}; public: - AllocPlanPerValue() : location(CPU, ONNXRuntimeArenaAllocator) {} + AllocPlanPerValue() : location(CPU, OrtArenaAllocator) {} }; // The following vector is indexed by MLValueIndex diff --git a/onnxruntime/core/framework/sequential_executor.cc b/onnxruntime/core/framework/sequential_executor.cc index f08380af33c65..a5d704a0b0d69 100644 --- a/onnxruntime/core/framework/sequential_executor.cc +++ b/onnxruntime/core/framework/sequential_executor.cc @@ -46,7 +46,7 @@ Status SequentialExecutor::Execute(const SessionState& session_state, for (const auto& node_exec_plan : exec_plan_vec) { if (terminate_flag_) { LOGS(logger, WARNING) << "Exiting due to terminate flag being set to true."; - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Exiting due to terminate flag being set to true."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Exiting due to terminate flag being set to true."); } auto node_index = node_exec_plan.node_index; @@ -54,7 +54,7 @@ Status SequentialExecutor::Execute(const SessionState& session_state, // if a kernel has been added in the session state, it better be NON-null. if (p_op_kernel == nullptr) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Got nullptr from GetKernel for node: ", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Got nullptr from GetKernel for node: ", session_state.GetGraphViewer()->GetNode(node_index)->Name()); const std::string& node_name = p_op_kernel->Node().Name(); @@ -98,7 +98,7 @@ Status SequentialExecutor::Execute(const SessionState& session_state, VLOGS(logger, 1) << "Computing kernel: " << p_op_kernel->Node().Name(); auto kernel_begin_time = session_state.Profiler().StartTime(); - ONNXRUNTIME_RETURN_IF_ERROR(p_op_kernel->Compute(&op_kernel_context)); + ORT_RETURN_IF_ERROR(p_op_kernel->Compute(&op_kernel_context)); session_state.Profiler().EndTimeAndRecordEvent(profiling::NODE_EVENT, node_name + "_kernel_time", kernel_begin_time, @@ -133,11 +133,11 @@ Status SequentialExecutor::Execute(const SessionState& session_state, // free ml-values corresponding to this node VLOGS(logger, 1) << "Releasing node ML values after computing kernel: " << p_op_kernel->Node().Name(); - ONNXRUNTIME_RETURN_IF_ERROR(ReleaseNodeMLValues(frame, seq_exec_plan, node_exec_plan, logger)); + ORT_RETURN_IF_ERROR(ReleaseNodeMLValues(frame, seq_exec_plan, node_exec_plan, logger)); } VLOGS(logger, 1) << "Fetching output."; - ONNXRUNTIME_RETURN_IF_ERROR(FetchOutput(session_state.GetMLValueNameIdxMap(), frame, output_names, fetches, logger)); + ORT_RETURN_IF_ERROR(FetchOutput(session_state.GetMLValueNameIdxMap(), frame, output_names, fetches, logger)); if (frame.HasPlan()) { std::vector input_shapes; @@ -153,8 +153,8 @@ Status SequentialExecutor::Execute(const SessionState& session_state, if (all_tensors) { auto mem_patterns = std::make_unique(); - ONNXRUNTIME_RETURN_IF_ERROR(frame.GeneratePatterns(mem_patterns.get())); - ONNXRUNTIME_RETURN_IF_ERROR(session_state.UpdateMemoryPatternGroupCache(input_shapes, std::move(mem_patterns))); + ORT_RETURN_IF_ERROR(frame.GeneratePatterns(mem_patterns.get())); + ORT_RETURN_IF_ERROR(session_state.UpdateMemoryPatternGroupCache(input_shapes, std::move(mem_patterns))); } } @@ -171,7 +171,7 @@ static Status FetchOutput(const MLValueNameIdxMap& name_idx_map, fetches.resize(output_names.size()); } else { // this should've been checked before already - ONNXRUNTIME_ENFORCE(output_names.size() == fetches.size(), + ORT_ENFORCE(output_names.size() == fetches.size(), "output_names vector size: " + std::to_string(output_names.size()) + " does not match that of fetches vector: " + std::to_string(fetches.size())); } @@ -181,7 +181,7 @@ static Status FetchOutput(const MLValueNameIdxMap& name_idx_map, for (const auto& oname : output_names) { VLOGS(logger, 1) << "Attempting to fetch output with name: " << oname; int mlvalue_index; - ONNXRUNTIME_RETURN_IF_ERROR(name_idx_map.GetIdx(oname, mlvalue_index)); + ORT_RETURN_IF_ERROR(name_idx_map.GetIdx(oname, mlvalue_index)); const MLValue& output_mlvalue = frame.GetMLValue(mlvalue_index); VLOGS(logger, 1) << "Copying fetched MLValue to output vector"; fetches[idx++] = output_mlvalue; @@ -198,7 +198,7 @@ static Status ReleaseNodeMLValues(ExecutionFrame& frame, for (auto i = node_exec_plan.free_from_index; i <= node_exec_plan.free_to_index; ++i) { auto mlvalue_idx = seq_exec_plan.to_be_freed[i]; VLOGS(logger, 1) << "Releasing mlvalue with index: " << mlvalue_idx; - ONNXRUNTIME_RETURN_IF_ERROR(frame.ReleaseMLValue(mlvalue_idx)); + ORT_RETURN_IF_ERROR(frame.ReleaseMLValue(mlvalue_idx)); } return Status::OK(); diff --git a/onnxruntime/core/framework/sequential_executor.h b/onnxruntime/core/framework/sequential_executor.h index fd73a91eea04a..e58d34ec7d392 100644 --- a/onnxruntime/core/framework/sequential_executor.h +++ b/onnxruntime/core/framework/sequential_executor.h @@ -25,7 +25,7 @@ class SequentialExecutor : public IExecutor { const logging::Logger& logger) override; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SequentialExecutor); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SequentialExecutor); const bool& terminate_flag_; }; } // namespace onnxruntime diff --git a/onnxruntime/core/framework/session_state.cc b/onnxruntime/core/framework/session_state.cc index d126fe5e5995a..a3198e1ded36a 100644 --- a/onnxruntime/core/framework/session_state.cc +++ b/onnxruntime/core/framework/session_state.cc @@ -12,7 +12,7 @@ using namespace ::onnxruntime::common; namespace onnxruntime { void SessionState::SetGraphViewer(std::unique_ptr graph_viewer) { - ONNXRUNTIME_ENFORCE(nullptr != graph_viewer); + ORT_ENFORCE(nullptr != graph_viewer); graph_viewer_ = std::move(graph_viewer); } @@ -42,7 +42,7 @@ const SequentialExecutionPlan* SessionState::GetExecutionPlan() const { } void SessionState::AddInitializedTensor(int mlvalue_index, const MLValue& mlvalue) { - ONNXRUNTIME_ENFORCE(mlvalue_index >= 0 && mlvalue_index <= mlvalue_name_idx_map_.MaxIdx()); + ORT_ENFORCE(mlvalue_index >= 0 && mlvalue_index <= mlvalue_name_idx_map_.MaxIdx()); initialized_tensors_.insert({mlvalue_index, mlvalue}); } @@ -138,10 +138,10 @@ void SessionState::AddSubgraphSessionState(onnxruntime::NodeIndex index, const SessionState& session_state) { auto entry = subgraph_session_states_.find(index); - // make sure this is new. internal logic error if it is not so using ONNXRUNTIME_ENFORCE. + // make sure this is new. internal logic error if it is not so using ORT_ENFORCE. if (entry != subgraph_session_states_.cend()) { const auto& existing_entries = entry->second; - ONNXRUNTIME_ENFORCE(existing_entries.find(attribute_name) == existing_entries.cend(), + ORT_ENFORCE(existing_entries.find(attribute_name) == existing_entries.cend(), "Entry exists in node ", index, " for attribute ", attribute_name); } diff --git a/onnxruntime/core/framework/session_state.h b/onnxruntime/core/framework/session_state.h index b700bb671d17e..74fb8ab23d1e8 100644 --- a/onnxruntime/core/framework/session_state.h +++ b/onnxruntime/core/framework/session_state.h @@ -149,7 +149,7 @@ class SessionState { void SetThreadPool(TaskThreadPool* p_pool) { thread_pool_ = p_pool; } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SessionState); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SessionState); // cache of the constructed kernels to avoid spending construction // time per executor diff --git a/onnxruntime/core/framework/session_state_initializer.cc b/onnxruntime/core/framework/session_state_initializer.cc index b83cdb7341335..6d8df8500ed80 100644 --- a/onnxruntime/core/framework/session_state_initializer.cc +++ b/onnxruntime/core/framework/session_state_initializer.cc @@ -43,7 +43,7 @@ static common::Status SaveInitializedTensors(const onnxruntime::Graph& graph, const SequentialExecutionPlan& execution_plan, const ExecutionProviders& exec_providers, const MLValueNameIdxMap& mlvalue_name_idx_map, - std::map& weights_buffers, + std::map& weights_buffers, const SaveTensorFunc& save_tensor_func, const logging::Logger& logger); @@ -72,9 +72,9 @@ common::Status SessionStateInitializer::CreatePlan(const onnxruntime::GraphTrans const InsertCastTransformer& insert_cast_transformer, const std::vector& outer_scope_node_args, bool enable_sequential_execution) { - ONNXRUNTIME_RETURN_IF_ERROR(TransformGraph(graph_, graph_transformation_manager, - execution_providers_, kernel_registry_manager_, - insert_cast_transformer)); + ORT_RETURN_IF_ERROR(TransformGraph(graph_, graph_transformation_manager, + execution_providers_, kernel_registry_manager_, + insert_cast_transformer)); // After transformation/partitioning, the graph now is fixed and graph viewer is created and set for execution. session_state_.SetGraphViewer(std::make_unique(graph_)); @@ -82,7 +82,7 @@ common::Status SessionStateInitializer::CreatePlan(const onnxruntime::GraphTrans auto& mlvalue_name_idx_map = session_state_.GetMLValueNameIdxMap(); // populate the SessionState MLValueNameIdxMap - ONNXRUNTIME_RETURN_IF_ERROR(SaveMLValueNameIndexMapping(graph_, mlvalue_name_idx_map, logger_)); + ORT_RETURN_IF_ERROR(SaveMLValueNameIndexMapping(graph_, mlvalue_name_idx_map, logger_)); // remove any outer scope args we don't know about. this can happen if a node contains multiple subgraphs. std::vector valid_outer_scope_node_args; @@ -99,7 +99,7 @@ common::Status SessionStateInitializer::CreatePlan(const onnxruntime::GraphTrans if (enable_sequential_execution) { // CreatePlan will create a new SequentialExecutionPlan instance that we will // save into the session state. - ONNXRUNTIME_RETURN_IF_ERROR( + ORT_RETURN_IF_ERROR( SequentialPlanner::CreatePlan(graph_, valid_outer_scope_node_args, execution_providers_, kernel_registry_manager_, mlvalue_name_idx_map, exec_plan)); @@ -107,7 +107,7 @@ common::Status SessionStateInitializer::CreatePlan(const onnxruntime::GraphTrans } else { // Parallel execution still uses same allocation plan, but has limitation of memory buffer reuse. SequentialPlannerContext context(true /* enable parallel execution */); - ONNXRUNTIME_RETURN_IF_ERROR( + ORT_RETURN_IF_ERROR( SequentialPlanner::CreatePlan(graph_, valid_outer_scope_node_args, execution_providers_, kernel_registry_manager_, mlvalue_name_idx_map, context, exec_plan)); @@ -118,9 +118,9 @@ common::Status SessionStateInitializer::CreatePlan(const onnxruntime::GraphTrans } common::Status SessionStateInitializer::InitializeAndSave(bool enable_memory_pattern, - std::map& weights_buffers) { + std::map& weights_buffers) { const auto* exec_plan_ptr = session_state_.GetExecutionPlan(); - ONNXRUNTIME_ENFORCE(exec_plan_ptr, "Execution plan was not found in SessionState. CreatePlan must be called first."); + ORT_ENFORCE(exec_plan_ptr, "Execution plan was not found in SessionState. CreatePlan must be called first."); const auto& exec_plan{*exec_plan_ptr}; const auto& mlvalue_name_idx_map{session_state_.GetMLValueNameIdxMap()}; @@ -130,14 +130,14 @@ common::Status SessionStateInitializer::InitializeAndSave(bool enable_memory_pat session_state_.AddInitializedTensor(idx, value); }; - ONNXRUNTIME_RETURN_IF_ERROR(SaveInitializedTensors(graph_, enable_memory_pattern, exec_plan, - execution_providers_, mlvalue_name_idx_map, weights_buffers, - add_initialized_tensor, logger_)); + ORT_RETURN_IF_ERROR(SaveInitializedTensors(graph_, enable_memory_pattern, exec_plan, + execution_providers_, mlvalue_name_idx_map, weights_buffers, + add_initialized_tensor, logger_)); graph_.CleanAllInitializedTensors(); // remove weights from the graph now to save memory - ONNXRUNTIME_RETURN_IF_ERROR(SaveKernels(execution_providers_, session_state_, kernel_registry_manager_, logger_)); - ONNXRUNTIME_RETURN_IF_ERROR(SaveInputOutputNamesToNodeMapping(graph_, kernel_registry_manager_, session_state_)); + ORT_RETURN_IF_ERROR(SaveKernels(execution_providers_, session_state_, kernel_registry_manager_, logger_)); + ORT_RETURN_IF_ERROR(SaveInputOutputNamesToNodeMapping(graph_, kernel_registry_manager_, session_state_)); return Status::OK(); } @@ -155,13 +155,13 @@ common::Status TransformGraph(onnxruntime::Graph& graph, // 5. insert cast nodes. // first apply the default/system/basic graph to graph optimizations. - ONNXRUNTIME_RETURN_IF_ERROR(graph_transformer_mgr.ApplyAll(graph)); + ORT_RETURN_IF_ERROR(graph_transformer_mgr.ApplyAll(graph)); auto kernels{kernel_registry_manager.GetAllKernelRegistries()}; // Do partitioning based on execution providers' capability. GraphPartitioner partitioner(kernel_registry_manager, providers); - ONNXRUNTIME_RETURN_IF_ERROR(partitioner.Partition(graph)); + ORT_RETURN_IF_ERROR(partitioner.Partition(graph)); // Insert copy nodes. for (auto& provider : providers) { @@ -175,9 +175,9 @@ common::Status TransformGraph(onnxruntime::Graph& graph, // Insert cast node/s. bool modified = false; - ONNXRUNTIME_RETURN_IF_ERROR(insert_cast_transformer.Apply(graph, modified)); + ORT_RETURN_IF_ERROR(insert_cast_transformer.Apply(graph, modified)); - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); return common::Status::OK(); } @@ -237,7 +237,7 @@ common::Status SaveMLValueNameIndexMapping(const onnxruntime::Graph& graph, } common::Status DeserializeTensorProto(const ONNX_NAMESPACE::TensorProto& tensor_proto, - const ONNXRuntimeAllocatorInfo& alloc_info, + const OrtAllocatorInfo& alloc_info, const ExecutionProviders& exec_providers, MLValue& mlvalue, void* preallocated, size_t preallocated_size) { auto alloc_ptr = utils::GetAllocator(exec_providers, alloc_info); @@ -245,7 +245,7 @@ common::Status DeserializeTensorProto(const ONNX_NAMESPACE::TensorProto& tensor_ return Status(common::ONNXRUNTIME, common::FAIL, "Failed to get allocator for alloc_info: " + alloc_info.ToString()); } - if (strcmp(alloc_info.name, CPU) == 0 || alloc_info.mem_type == ONNXRuntimeMemTypeCPUOutput) { + if (strcmp(alloc_info.name, CPU) == 0 || alloc_info.mem_type == OrtMemTypeCPUOutput) { // deserialize directly to CPU tensor return utils::TensorProtoToMLValue(tensor_proto, alloc_ptr, preallocated, preallocated_size, mlvalue); } @@ -254,11 +254,11 @@ common::Status DeserializeTensorProto(const ONNX_NAMESPACE::TensorProto& tensor_ // deserialize to CPU first for non-CPU allocator, then alloc and copy AllocatorPtr deserialize_alloc_ptr; std::unique_ptr p_deserialize_tensor; - deserialize_alloc_ptr = exec_providers.Get(kCpuExecutionProvider)->GetAllocator(0, ONNXRuntimeMemTypeDefault); - ONNXRUNTIME_RETURN_IF_ERROR(utils::GetTensorFromTensorProto(tensor_proto, &p_deserialize_tensor, - deserialize_alloc_ptr)); + deserialize_alloc_ptr = exec_providers.Get(kCpuExecutionProvider)->GetAllocator(0, OrtMemTypeDefault); + ORT_RETURN_IF_ERROR(utils::GetTensorFromTensorProto(tensor_proto, &p_deserialize_tensor, + deserialize_alloc_ptr)); const IExecutionProvider* provider = exec_providers.Get(alloc_info); - ONNXRUNTIME_ENFORCE(provider != nullptr); + ORT_ENFORCE(provider != nullptr); p_tensor = std::make_unique( p_deserialize_tensor->DataType(), p_deserialize_tensor->Shape(), @@ -286,7 +286,7 @@ common::Status DeserializeTensorProto(const ONNX_NAMESPACE::TensorProto& tensor_ static common::Status PlanTensor(MLValuePatternPlanner& planner, const MLValueNameIdxMap& mlvalue_name_idx_map, const std::string& name, const ONNX_NAMESPACE::TensorProto& tensor_proto) { int mlvalue_index; - ONNXRUNTIME_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(name, mlvalue_index)); + ORT_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(name, mlvalue_index)); size_t len; Status st = utils::GetSizeInBytesFromTensorProto<256>(tensor_proto, &len); if (st.Code() == common::NOT_IMPLEMENTED) return Status::OK(); @@ -298,12 +298,12 @@ common::Status SaveInitializedTensorsWithMemPattern(const Graph& graph, const SequentialExecutionPlan& execution_plan, const ExecutionProviders& exec_providers, const MLValueNameIdxMap& mlvalue_name_idx_map, - std::map& weights_buffers, + std::map& weights_buffers, const SaveTensorFunc& save_tensor_func, const logging::Logger& logger) { LOGS(logger, INFO) << "Saving initialized tensors."; - ONNXRUNTIME_ENFORCE(mlvalue_name_idx_map.MaxIdx() > 0, "MLValue indexes should have been populated."); + ORT_ENFORCE(mlvalue_name_idx_map.MaxIdx() > 0, "MLValue indexes should have been populated."); MLValuePatternPlanner planner(execution_plan); @@ -311,16 +311,16 @@ common::Status SaveInitializedTensorsWithMemPattern(const Graph& graph, const onnxruntime::InitializedTensorSet& initialized_tensor_set = graph.GetAllInitializedTensors(); for (const auto& entry : initialized_tensor_set) { //string/complex64/complex128 tensors will be skipped - ONNXRUNTIME_RETURN_IF_ERROR(PlanTensor(planner, mlvalue_name_idx_map, entry.first, *entry.second)); + ORT_RETURN_IF_ERROR(PlanTensor(planner, mlvalue_name_idx_map, entry.first, *entry.second)); } //2. allocate weight buffer on different locations MemoryPatternGroup mem_patterns; - ONNXRUNTIME_RETURN_IF_ERROR(planner.GeneratePatterns(&mem_patterns)); + ORT_RETURN_IF_ERROR(planner.GeneratePatterns(&mem_patterns)); for (size_t i = 0; i < mem_patterns.locations.size(); i++) { auto& location = mem_patterns.locations[i]; - ONNXRUNTIME_ENFORCE(weights_buffers.find(location) == weights_buffers.end(), - "Existing entry in weights buffer for ", location.name); + ORT_ENFORCE(weights_buffers.find(location) == weights_buffers.end(), + "Existing entry in weights buffer for ", location.name); auto alloc = utils::GetAllocator(exec_providers, location); if (!alloc) @@ -335,7 +335,7 @@ common::Status SaveInitializedTensorsWithMemPattern(const Graph& graph, for (const auto& entry : initialized_tensor_set) { const std::string& name = entry.first; int mlvalue_index; - ONNXRUNTIME_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(name, mlvalue_index)); + ORT_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(name, mlvalue_index)); const ONNX_NAMESPACE::TensorProto& tensor_proto = *(entry.second); auto& location = execution_plan.allocation_plan[mlvalue_index].location; @@ -385,17 +385,17 @@ common::Status SaveInitializedTensorsWithSeperateBuffer(const onnxruntime::Graph const logging::Logger& logger) { LOGS(logger, INFO) << "Saving initialized tensors."; - ONNXRUNTIME_ENFORCE(mlvalue_name_idx_map.MaxIdx() > 0, "MLValue indexes should have been populated."); + ORT_ENFORCE(mlvalue_name_idx_map.MaxIdx() > 0, "MLValue indexes should have been populated."); const onnxruntime::InitializedTensorSet& initialized_tensor_set = graph.GetAllInitializedTensors(); for (const auto& entry : initialized_tensor_set) { const std::string& name = entry.first; int mlvalue_index; - ONNXRUNTIME_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(name, mlvalue_index)); + ORT_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(name, mlvalue_index)); VLOGS(logger, 1) << "About to add weight with name: " << name << " and index: " << mlvalue_index; auto& location = execution_plan.allocation_plan[mlvalue_index].location; MLValue mlvalue; - ONNXRUNTIME_RETURN_IF_ERROR(DeserializeTensorProto(*(entry.second), location, exec_providers, mlvalue, nullptr, 0)); + ORT_RETURN_IF_ERROR(DeserializeTensorProto(*(entry.second), location, exec_providers, mlvalue, nullptr, 0)); save_tensor_func(mlvalue_index, mlvalue); VLOGS(logger, 1) << "Added weight with name : " << name << " with index: " << mlvalue_index; } @@ -409,7 +409,7 @@ common::Status SaveInitializedTensors(const onnxruntime::Graph& graph, const SequentialExecutionPlan& execution_plan, const ExecutionProviders& exec_providers, const MLValueNameIdxMap& mlvalue_name_idx_map, - std::map& weights_buffers, + std::map& weights_buffers, const SaveTensorFunc& save_tensor_func, const logging::Logger& logger) { // if we enable the memory pattern and already have the execution plan @@ -441,8 +441,8 @@ static common::Status CreateOpKernel(const onnxruntime::Node& node, const IExecutionProvider* exec_provider = nullptr; if (exec_provider_name.empty() || (exec_provider = execution_providers.Get(exec_provider_name)) == nullptr) { - auto status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Could not create kernel for node: ", node.Name(), - " as there's no execution provider allocated."); + auto status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Could not create kernel for node: ", node.Name(), + " as there's no execution provider allocated."); LOGS(logger, ERROR) << status.ErrorMessage(); } @@ -466,7 +466,7 @@ common::Status SaveKernels(const ExecutionProviders& execution_providers, for (auto& node : session_state.GetGraphViewer()->Nodes()) { // construct and save the kernels std::unique_ptr op_kernel; - ONNXRUNTIME_RETURN_IF_ERROR(CreateOpKernel(node, execution_providers, session_state, custom_registry_manager, op_kernel, logger)); + ORT_RETURN_IF_ERROR(CreateOpKernel(node, execution_providers, session_state, custom_registry_manager, op_kernel, logger)); session_state.AddKernel(node.Index(), std::move(op_kernel)); } @@ -490,7 +490,7 @@ common::Status SaveInputOutputNamesToNodeMapping(const onnxruntime::Graph& graph auto& graph_outputs = graph.GetOutputs(); for (auto& node : graph.Nodes()) { - ONNXRUNTIME_RETURN_IF_ERROR( + ORT_RETURN_IF_ERROR( onnxruntime::Node::ForEachWithIndex( node.InputDefs(), [&](const onnxruntime::NodeArg& arg, size_t index) { diff --git a/onnxruntime/core/framework/session_state_initializer.h b/onnxruntime/core/framework/session_state_initializer.h index dd1d6c1dcb955..95fc97d65b3ab 100644 --- a/onnxruntime/core/framework/session_state_initializer.h +++ b/onnxruntime/core/framework/session_state_initializer.h @@ -37,7 +37,7 @@ class SessionStateInitializer { // initialize tensors, and save. save kernels and input/output node mappings // @param enable_memory_pattern common::Status InitializeAndSave(bool enable_memory_pattern, - std::map& weights_buffers); + std::map& weights_buffers); private: onnxruntime::Graph& graph_; diff --git a/onnxruntime/core/framework/tensor.cc b/onnxruntime/core/framework/tensor.cc index 7334303e407df..2d805d942fe0f 100644 --- a/onnxruntime/core/framework/tensor.cc +++ b/onnxruntime/core/framework/tensor.cc @@ -11,18 +11,18 @@ namespace onnxruntime { Tensor::Tensor(MLDataType p_type, const TensorShape& shape, BufferNakedPtr p_data, - const ONNXRuntimeAllocatorInfo& alloc, + const OrtAllocatorInfo& alloc, AllocatorPtr deleter, const int64_t offset) : alloc_info_(alloc) { - ONNXRUNTIME_ENFORCE(p_type != nullptr); + ORT_ENFORCE(p_type != nullptr); Init(p_type, shape, p_data, alloc, std::move(deleter), offset); } void Tensor::Init(MLDataType p_type, const TensorShape& shape, void* p_raw_data, - const ONNXRuntimeAllocatorInfo& alloc, + const OrtAllocatorInfo& alloc, AllocatorPtr deleter, const int64_t offset) { if (shape.Size() < 0) @@ -83,7 +83,7 @@ Tensor::Tensor(const Tensor& src) : shape_(src.shape_), dtype_(src.dtype_), alloc_info_(src.alloc_info_), byte_offset_(src.byte_offset_) { // it may be better to refactor it a little bit to make it a compile error // but right now just keep it simple first. - ONNXRUNTIME_ENFORCE(src.buffer_deleter_ == nullptr, + ORT_ENFORCE(src.buffer_deleter_ == nullptr, "Can't copy tensor with its owned buffer. Please transfer ownership by move."); p_data_ = src.p_data_; @@ -96,7 +96,7 @@ Tensor::~Tensor() { Tensor& Tensor::ShallowCopy(const Tensor& other) { // similar as above - ONNXRUNTIME_ENFORCE(other.buffer_deleter_ == nullptr, + ORT_ENFORCE(other.buffer_deleter_ == nullptr, "Can't copy tensor with its owned buffer. Please transfer ownership by move."); if (this != &other) { diff --git a/onnxruntime/core/framework/tensor_shape.cc b/onnxruntime/core/framework/tensor_shape.cc index 8db0005d7a208..e252f64729e24 100644 --- a/onnxruntime/core/framework/tensor_shape.cc +++ b/onnxruntime/core/framework/tensor_shape.cc @@ -37,7 +37,7 @@ int64_t TensorShape::Size() const { int64_t TensorShape::SizeToDimension(size_t dimension) const { const size_t num_dims = size(); - ONNXRUNTIME_ENFORCE(dimension <= num_dims, + ORT_ENFORCE(dimension <= num_dims, "Invalid dimension of ", dimension, " for SizeFromDimension. Tensor has ", num_dims, " dimensions."); @@ -47,7 +47,7 @@ int64_t TensorShape::SizeToDimension(size_t dimension) const { int64_t TensorShape::SizeFromDimension(size_t dimension) const { const size_t num_dims = size(); - ONNXRUNTIME_ENFORCE(dimension <= num_dims, + ORT_ENFORCE(dimension <= num_dims, "Invalid dimension of ", dimension, " for SizeFromDimension. Tensor has ", num_dims, " dimensions."); @@ -56,7 +56,7 @@ int64_t TensorShape::SizeFromDimension(size_t dimension) const { } TensorShape TensorShape::Slice(size_t dimstart, size_t dimend) const { - ONNXRUNTIME_ENFORCE(dimstart <= dimend && dimend <= size(), + ORT_ENFORCE(dimstart <= dimend && dimend <= size(), "Invalid tensor shape slice argument."); return TensorShape(*this, dimstart, dimend); } diff --git a/onnxruntime/core/framework/tensor_type_and_shape.cc b/onnxruntime/core/framework/tensor_type_and_shape.cc index c884d4d78dd67..76f0c88957daa 100644 --- a/onnxruntime/core/framework/tensor_type_and_shape.cc +++ b/onnxruntime/core/framework/tensor_type_and_shape.cc @@ -14,74 +14,74 @@ using onnxruntime::DataTypeImpl; using onnxruntime::MLFloat16; using onnxruntime::Tensor; -struct ONNXRuntimeTensorTypeAndShapeInfo : public onnxruntime::ObjectBase { +struct OrtTensorTypeAndShapeInfo : public onnxruntime::ObjectBase { public: - friend class onnxruntime::ObjectBase; + friend class onnxruntime::ObjectBase; - OnnxRuntimeTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; + OrtTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; onnxruntime::TensorShape shape; - static ONNXRuntimeTensorTypeAndShapeInfo* Create() { - return new ONNXRuntimeTensorTypeAndShapeInfo(); + static OrtTensorTypeAndShapeInfo* Create() { + return new OrtTensorTypeAndShapeInfo(); } - ONNXRuntimeTensorTypeAndShapeInfo(const ONNXRuntimeTensorTypeAndShapeInfo& other) = delete; - ONNXRuntimeTensorTypeAndShapeInfo& operator=(const ONNXRuntimeTensorTypeAndShapeInfo& other) = delete; + OrtTensorTypeAndShapeInfo(const OrtTensorTypeAndShapeInfo& other) = delete; + OrtTensorTypeAndShapeInfo& operator=(const OrtTensorTypeAndShapeInfo& other) = delete; private: - ONNXRuntimeTensorTypeAndShapeInfo() = default; - ~ONNXRuntimeTensorTypeAndShapeInfo() { + OrtTensorTypeAndShapeInfo() = default; + ~OrtTensorTypeAndShapeInfo() { assert(ref_count == 0); } }; #define API_IMPL_BEGIN try { -#define API_IMPL_END \ - } \ - catch (std::exception & ex) { \ - return CreateONNXStatus(ONNXRUNTIME_RUNTIME_EXCEPTION, ex.what()); \ +#define API_IMPL_END \ + } \ + catch (std::exception & ex) { \ + return CreateONNXStatus(ORT_RUNTIME_EXCEPTION, ex.what()); \ } -ONNXRUNTIME_API(ONNXRuntimeTensorTypeAndShapeInfo*, ONNXRuntimeCreateTensorTypeAndShapeInfo) { - return ONNXRuntimeTensorTypeAndShapeInfo::Create(); +ORT_API(OrtTensorTypeAndShapeInfo*, OrtCreateTensorTypeAndShapeInfo) { + return OrtTensorTypeAndShapeInfo::Create(); } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeSetTensorElementType, _In_ ONNXRuntimeTensorTypeAndShapeInfo* this_ptr, enum OnnxRuntimeTensorElementDataType type) { +ORT_API_STATUS_IMPL(OrtSetTensorElementType, _In_ OrtTensorTypeAndShapeInfo* this_ptr, enum OrtTensorElementDataType type) { API_IMPL_BEGIN this_ptr->type = type; return nullptr; API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeSetDims, _In_ ONNXRuntimeTensorTypeAndShapeInfo* this_ptr, _In_ const int64_t* dim_values, size_t dim_count) { +ORT_API_STATUS_IMPL(OrtSetDims, _In_ OrtTensorTypeAndShapeInfo* this_ptr, _In_ const int64_t* dim_values, size_t dim_count) { API_IMPL_BEGIN this_ptr->shape = onnxruntime::TensorShape(dim_values, dim_count); return nullptr; API_IMPL_END } -ONNXRUNTIME_API(enum OnnxRuntimeTensorElementDataType, ONNXRuntimeGetTensorElementType, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo* info) { +ORT_API(enum OrtTensorElementDataType, OrtGetTensorElementType, _In_ const struct OrtTensorTypeAndShapeInfo* info) { return info->type; } -ONNXRUNTIME_API(size_t, ONNXRuntimeGetNumOfDimensions, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo* info) { +ORT_API(size_t, OrtGetNumOfDimensions, _In_ const struct OrtTensorTypeAndShapeInfo* info) { return info->shape.NumDimensions(); } -ONNXRUNTIME_API(void, ONNXRuntimeGetDimensions, _In_ const struct ONNXRuntimeTensorTypeAndShapeInfo* info, _Out_ int64_t* dim_values, size_t dim_values_length) { +ORT_API(void, OrtGetDimensions, _In_ const struct OrtTensorTypeAndShapeInfo* info, _Out_ int64_t* dim_values, size_t dim_values_length) { info->shape.CopyDims(dim_values, dim_values_length); } -ONNXRUNTIME_API(int64_t, ONNXRuntimeGetTensorShapeElementCount, _In_ const ONNXRuntimeTensorTypeAndShapeInfo* this_ptr) { +ORT_API(int64_t, OrtGetTensorShapeElementCount, _In_ const OrtTensorTypeAndShapeInfo* this_ptr) { return this_ptr->shape.Size(); } struct ONNXValue; namespace { -inline OnnxRuntimeTensorElementDataType MLDataTypeToOnnxRuntimeTensorElementDataType( +inline OrtTensorElementDataType MLDataTypeToOnnxRuntimeTensorElementDataType( const onnxruntime::DataTypeImpl* cpp_type) { - OnnxRuntimeTensorElementDataType type; + OrtTensorElementDataType type; if (cpp_type == onnxruntime::DataTypeImpl::GetType()) { type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; } else if (cpp_type == onnxruntime::DataTypeImpl::GetType()) { @@ -115,21 +115,21 @@ inline OnnxRuntimeTensorElementDataType MLDataTypeToOnnxRuntimeTensorElementData } } // namespace -ONNXStatus* GetTensorShapeAndType(const onnxruntime::TensorShape* shape, const onnxruntime::DataTypeImpl* tensor_data_type, ONNXRuntimeTensorTypeAndShapeInfo** out) { - OnnxRuntimeTensorElementDataType type = MLDataTypeToOnnxRuntimeTensorElementDataType(tensor_data_type); +ONNXStatus* GetTensorShapeAndType(const onnxruntime::TensorShape* shape, const onnxruntime::DataTypeImpl* tensor_data_type, OrtTensorTypeAndShapeInfo** out) { + OrtTensorElementDataType type = MLDataTypeToOnnxRuntimeTensorElementDataType(tensor_data_type); if (ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED == type) { - return CreateONNXStatus(ONNXRUNTIME_FAIL, "Not implemented"); + return CreateONNXStatus(ORT_FAIL, "Not implemented"); } - ONNXRuntimeTensorTypeAndShapeInfo* ret = ONNXRuntimeCreateTensorTypeAndShapeInfo(); - auto status = ONNXRuntimeSetTensorElementType(ret, type); + OrtTensorTypeAndShapeInfo* ret = OrtCreateTensorTypeAndShapeInfo(); + auto status = OrtSetTensorElementType(ret, type); if (status != nullptr) { - ONNXRuntimeReleaseObject(ret); + OrtReleaseObject(ret); return status; } if (shape != nullptr) { - status = ONNXRuntimeSetDims(ret, shape->GetDims().data(), shape->GetDims().size()); + status = OrtSetDims(ret, shape->GetDims().data(), shape->GetDims().size()); if (status != nullptr) { - ONNXRuntimeReleaseObject(ret); + OrtReleaseObject(ret); return status; } } @@ -137,8 +137,8 @@ ONNXStatus* GetTensorShapeAndType(const onnxruntime::TensorShape* shape, const o return nullptr; } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetTensorShapeAndType, _In_ const ONNXValue* value, - _Out_ ONNXRuntimeTensorTypeAndShapeInfo** out) { +ORT_API_STATUS_IMPL(OrtGetTensorShapeAndType, _In_ const ONNXValue* value, + _Out_ OrtTensorTypeAndShapeInfo** out) { API_IMPL_BEGIN auto v = reinterpret_cast(value); const onnxruntime::Tensor& tensor = v->Get(); @@ -146,30 +146,30 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetTensorShapeAndType, _In_ const ONNXVal API_IMPL_END } -ONNXRUNTIME_API(enum ONNXRuntimeType, ONNXRuntimeGetValueType, _In_ const ONNXValue* value) { +ORT_API(enum OrtType, OrtGetValueType, _In_ const ONNXValue* value) { try { auto v = reinterpret_cast(value); onnxruntime::MLDataType type = v->Type(); - ONNXRuntimeTypeInfo* out; - ONNXStatus* ptr = ONNXRuntimeTypeInfo::FromDataTypeImpl(type, nullptr, nullptr, &out); + OrtTypeInfo* out; + ONNXStatus* ptr = OrtTypeInfo::FromDataTypeImpl(type, nullptr, nullptr, &out); if (ptr != nullptr) { ReleaseONNXStatus(ptr); - return ONNXRUNTIME_TYPE_UNKNOWN; + return ORT_TYPE_UNKNOWN; } - ONNXRuntimeType ret = out->type; - ONNXRuntimeReleaseObject(out); + OrtType ret = out->type; + OrtReleaseObject(out); return ret; } catch (std::exception&) { - return ONNXRUNTIME_TYPE_UNKNOWN; + return ORT_TYPE_UNKNOWN; } } /** * Get the type information of an ONNXValue * \param value - * \return The returned value should be freed by ONNXRuntimeReleaseObject after use + * \return The returned value should be freed by OrtReleaseObject after use */ -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetTypeInfo, _In_ const ONNXValue* value, struct ONNXRuntimeTypeInfo** out) { +ORT_API_STATUS_IMPL(OrtGetTypeInfo, _In_ const ONNXValue* value, struct OrtTypeInfo** out) { auto v = reinterpret_cast(value); onnxruntime::MLDataType type = v->Type(); if (type == nullptr) { @@ -179,7 +179,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetTypeInfo, _In_ const ONNXValue* value, if (type == DataTypeImpl::GetType()) { const onnxruntime::Tensor& tensor = v->Get(); const onnxruntime::TensorShape& shape = tensor.Shape(); - return ONNXRuntimeTypeInfo::FromDataTypeImpl(type, &shape, tensor.DataType(), out); + return OrtTypeInfo::FromDataTypeImpl(type, &shape, tensor.DataType(), out); } - return ONNXRuntimeTypeInfo::FromDataTypeImpl(type, nullptr, nullptr, out); + return OrtTypeInfo::FromDataTypeImpl(type, nullptr, nullptr, out); } diff --git a/onnxruntime/core/framework/tensorprotoutils.cc b/onnxruntime/core/framework/tensorprotoutils.cc index e425c590383e3..b36c72266a7fa 100644 --- a/onnxruntime/core/framework/tensorprotoutils.cc +++ b/onnxruntime/core/framework/tensorprotoutils.cc @@ -47,7 +47,7 @@ common::Status GetTensorByTypeFromTensorProto(const TensorProto& tensor_proto, int64_t tensor_size = tensor_shape.Size(); //tensor_size could be zero. see test_slice_start_out_of_bounds\test_data_set_0\output_0.pb if (tensor_size < 0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid shape ", tensor_shape); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid shape ", tensor_shape); } size_t size_to_allocate; if (!IAllocator::CalcMemSizeForArrayWithAlignment<256>(static_cast(tensor_size), sizeof(T), &size_to_allocate)) { @@ -55,10 +55,10 @@ common::Status GetTensorByTypeFromTensorProto(const TensorProto& tensor_proto, } if (preallocated && preallocated_size != size_to_allocate) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "The buffer planner is not consistent with tensor buffer size, expected ", size_to_allocate, ", got ", preallocated_size); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "The buffer planner is not consistent with tensor buffer size, expected ", size_to_allocate, ", got ", preallocated_size); //TODO(): size_to_allocate could be zero. We shouldn't pass zero to alloc->Alloc() T* p_data = static_cast(preallocated ? preallocated : alloc->Alloc(size_to_allocate)); - ONNXRUNTIME_RETURN_IF_ERROR(::onnxruntime::utils::TensorUtils::UnpackTensor(tensor_proto, p_data, tensor_size)); + ORT_RETURN_IF_ERROR(::onnxruntime::utils::TensorUtils::UnpackTensor(tensor_proto, p_data, tensor_size)); *p_tensor = std::make_unique(DataTypeImpl::GetType(), tensor_shape, static_cast(p_data), @@ -102,7 +102,7 @@ common::Status GetTensorByTypeFromTensorProto(const TensorProto& te however restricting it to string types only alleviates this concern for other types at least. Hence the template specialization for string. */ - ONNXRUNTIME_RETURN_IF_ERROR(::onnxruntime::utils::TensorUtils::UnpackTensor(tensor_proto, p_data, tensor_size)); + ORT_RETURN_IF_ERROR(::onnxruntime::utils::TensorUtils::UnpackTensor(tensor_proto, p_data, tensor_size)); return common::Status::OK(); } @@ -128,7 +128,7 @@ common::Status GetTensorByTypeFromTensorProto(const TensorProto& tens return Status(ONNXRUNTIME, FAIL, "The buffer planner is not consistent with tensor buffer size"); MLFloat16* p_data = static_cast(preallocated ? preallocated : alloc->Alloc(size_to_allocate)); - ONNXRUNTIME_RETURN_IF_ERROR(::onnxruntime::utils::TensorUtils::UnpackTensor(tensor_proto, p_data, tensor_size)); + ORT_RETURN_IF_ERROR(::onnxruntime::utils::TensorUtils::UnpackTensor(tensor_proto, p_data, tensor_size)); *p_tensor = std::make_unique(DataTypeImpl::GetType(), tensor_shape, static_cast(p_data), @@ -141,7 +141,7 @@ common::Status GetTensorByTypeFromTensorProto(const TensorProto& tens Status TensorProtoToMLValue(const ONNX_NAMESPACE::TensorProto& input, AllocatorPtr allocator, void* preallocated, size_t preallocated_size, MLValue& value) { std::unique_ptr p_tensor; - ONNXRUNTIME_RETURN_IF_ERROR(GetTensorFromTensorProto(input, &p_tensor, allocator, preallocated, preallocated_size)); + ORT_RETURN_IF_ERROR(GetTensorFromTensorProto(input, &p_tensor, allocator, preallocated, preallocated_size)); value.Init(p_tensor.release(), DataTypeImpl::GetType(), DataTypeImpl::GetType()->GetDeleteFunc()); diff --git a/onnxruntime/core/framework/tensorutils.cc b/onnxruntime/core/framework/tensorutils.cc index d104a5d6f9e2b..7723e12168b21 100644 --- a/onnxruntime/core/framework/tensorutils.cc +++ b/onnxruntime/core/framework/tensorutils.cc @@ -64,7 +64,7 @@ namespace utils { return Status(common::ONNXRUNTIME, common::FAIL, "size overflow"); \ } \ if (tensor.raw_data().size() != expected_size_in_bytes) \ - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, \ + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \ "UnpackTensor: the pre-allocated size does not match the raw data size, expected ", \ expected_size_in_bytes, ", got ", tensor.raw_data().size()); \ UnpackTensorWithRawData(tensor, p_data); \ diff --git a/onnxruntime/core/framework/transformer_memcpy.cc b/onnxruntime/core/framework/transformer_memcpy.cc index ebff869a0e3da..a4faef010d5c0 100644 --- a/onnxruntime/core/framework/transformer_memcpy.cc +++ b/onnxruntime/core/framework/transformer_memcpy.cc @@ -70,7 +70,7 @@ void TransformerMemcpyImpl::ProcessDefs(onnxruntime::Node& node, const KernelReg kernel_registries.SearchKernelRegistry(node, &kci); const auto* input_mem_types = kci ? &kci->kernel_def->InputMemoryType() : nullptr; const auto* output_mem_types = kci ? &kci->kernel_def->InputMemoryType() : nullptr; - ONNXRUNTIME_ENFORCE(onnxruntime::Node::ForEachWithIndex( + ORT_ENFORCE(onnxruntime::Node::ForEachWithIndex( node.InputDefs(), [this, &input_mem_types](const onnxruntime::NodeArg& arg, size_t index) { if (input_mem_types && MemTypeOnCpuExplicitly(*input_mem_types, index)) @@ -94,7 +94,7 @@ void TransformerMemcpyImpl::ProcessDefs(onnxruntime::Node& node, const KernelReg } else { // TODO: copy between devices? i.e. multiple GPUs if (node.GetExecutionProviderType() != onnxruntime::kCpuExecutionProvider && !node.GetExecutionProviderType().empty()) { - ONNXRUNTIME_THROW("Execution type '", node.GetExecutionProviderType(), "' doesn't support memcpy "); + ORT_THROW("Execution type '", node.GetExecutionProviderType(), "' doesn't support memcpy "); } for (const auto* arg : node.InputDefs()) { @@ -159,7 +159,7 @@ void TransformerMemcpyImpl::ProcessInitializers() { const TensorProto* tensor_proto = nullptr; bool found = graph_.GetInitializedTensor(name, tensor_proto); - ONNXRUNTIME_ENFORCE(found, "Failed to get initialized tensor ", name); + ORT_ENFORCE(found, "Failed to get initialized tensor ", name); TensorProto new_tensor_proto = *tensor_proto; *(new_tensor_proto.mutable_name()) = new_def_name; diff --git a/onnxruntime/core/framework/transformer_memcpy.h b/onnxruntime/core/framework/transformer_memcpy.h index 97eacb1091930..c4459d9d7429d 100644 --- a/onnxruntime/core/framework/transformer_memcpy.h +++ b/onnxruntime/core/framework/transformer_memcpy.h @@ -24,7 +24,7 @@ class TransformerMemcpyImpl { void ProcessInitializers(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TransformerMemcpyImpl); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TransformerMemcpyImpl); // use value-based compare to make sure transformer output order is consistent struct NodeCompare { diff --git a/onnxruntime/core/framework/utils.cc b/onnxruntime/core/framework/utils.cc index 05e6f7bd126e9..4bb3a192f59d5 100644 --- a/onnxruntime/core/framework/utils.cc +++ b/onnxruntime/core/framework/utils.cc @@ -26,7 +26,7 @@ const KernelDef* GetKernelDef(const KernelRegistryManager& kernel_registry, return kernel_def; } -AllocatorPtr GetAllocator(const ExecutionProviders& exec_providers, const ONNXRuntimeAllocatorInfo& allocator_info) { +AllocatorPtr GetAllocator(const ExecutionProviders& exec_providers, const OrtAllocatorInfo& allocator_info) { auto exec_provider = exec_providers.Get(allocator_info); if (exec_provider == nullptr) { return nullptr; @@ -35,7 +35,7 @@ AllocatorPtr GetAllocator(const ExecutionProviders& exec_providers, const ONNXRu return exec_provider->GetAllocator(allocator_info.id, allocator_info.mem_type); } -AllocatorPtr GetAllocator(const SessionState& session_state, const ONNXRuntimeAllocatorInfo& allocator_info) { +AllocatorPtr GetAllocator(const SessionState& session_state, const OrtAllocatorInfo& allocator_info) { return GetAllocator(session_state.GetExecutionProviders(), allocator_info); } diff --git a/onnxruntime/core/framework/utils.h b/onnxruntime/core/framework/utils.h index 62c462943240c..4e4d04ede6273 100644 --- a/onnxruntime/core/framework/utils.h +++ b/onnxruntime/core/framework/utils.h @@ -29,9 +29,9 @@ const KernelDef* GetKernelDef(const onnxruntime::Graph& graph, const KernelRegistryManager& kernel_registry, const onnxruntime::NodeIndex node_id); -AllocatorPtr GetAllocator(const ExecutionProviders& exec_providers, const ONNXRuntimeAllocatorInfo& allocator_info); +AllocatorPtr GetAllocator(const ExecutionProviders& exec_providers, const OrtAllocatorInfo& allocator_info); AllocatorPtr GetAllocator(const SessionState& session_state, - const ONNXRuntimeAllocatorInfo& allocator_info); + const OrtAllocatorInfo& allocator_info); } // namespace utils } // namespace onnxruntime diff --git a/onnxruntime/core/graph/conv_activation_fusion.cc b/onnxruntime/core/graph/conv_activation_fusion.cc index 5b4649c69b219..2d43d8c00130e 100644 --- a/onnxruntime/core/graph/conv_activation_fusion.cc +++ b/onnxruntime/core/graph/conv_activation_fusion.cc @@ -79,7 +79,7 @@ Status ConvActivationFusion::Apply(Graph& graph, bool& modified) const { if (!removed_nodes.empty()) { modified = true; - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); } return Status::OK(); } diff --git a/onnxruntime/core/graph/conv_add_fusion.cc b/onnxruntime/core/graph/conv_add_fusion.cc index 17e9352f62c03..a87b467791440 100644 --- a/onnxruntime/core/graph/conv_add_fusion.cc +++ b/onnxruntime/core/graph/conv_add_fusion.cc @@ -132,7 +132,7 @@ Status ConvAddFusion::Apply(onnxruntime::Graph& graph, bool& modified) const { if (!removed_nodes.empty()) { modified = true; - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); } return Status::OK(); diff --git a/onnxruntime/core/graph/conv_bn_fusion.cc b/onnxruntime/core/graph/conv_bn_fusion.cc index aaf15ab6ec8fe..eab056bf271a5 100644 --- a/onnxruntime/core/graph/conv_bn_fusion.cc +++ b/onnxruntime/core/graph/conv_bn_fusion.cc @@ -179,7 +179,7 @@ Status ConvBNFusion::Apply(onnxruntime::Graph& graph, bool& modified) const { if (!removed_nodes.empty()) { modified = true; - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); } return Status::OK(); } diff --git a/onnxruntime/core/graph/conv_mul_fusion.cc b/onnxruntime/core/graph/conv_mul_fusion.cc index 152d689547a20..facbbcc4989b8 100644 --- a/onnxruntime/core/graph/conv_mul_fusion.cc +++ b/onnxruntime/core/graph/conv_mul_fusion.cc @@ -136,7 +136,7 @@ Status ConvMulFusion::Apply(onnxruntime::Graph& graph, bool& modified) const { if (!removed_nodes.empty()) { modified = true; - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); } return Status::OK(); } diff --git a/onnxruntime/core/graph/function.cc b/onnxruntime/core/graph/function.cc index 5a87ef48407a9..6212da6a2e84b 100644 --- a/onnxruntime/core/graph/function.cc +++ b/onnxruntime/core/graph/function.cc @@ -118,7 +118,7 @@ FunctionImpl::FunctionImpl(const onnxruntime::Graph& graph, sub_graph.AddNode(node->Name(), node->OpType(), node->Description(), inputs, outputs, &node->GetAttributes(), node->Domain()); } //TODO: if we reuse the nodes in parent graph, maybe we don't need to resolve it. - ONNXRUNTIME_ENFORCE(sub_graph.Resolve().IsOK()); + ORT_ENFORCE(sub_graph.Resolve().IsOK()); } FunctionImpl::FunctionImpl(const onnxruntime::Graph& graph, @@ -207,7 +207,7 @@ FunctionImpl::FunctionImpl(const onnxruntime::Graph& graph, sub_graph.AddNode(node.name() + "_" + std::to_string(node_index), node.op_type(), node.doc_string(), inputs, outputs, &new_attr_map, node.domain()); } auto status = sub_graph.Resolve(); - ONNXRUNTIME_ENFORCE(status.IsOK()); + ORT_ENFORCE(status.IsOK()); } FunctionImpl::~FunctionImpl() = default; diff --git a/onnxruntime/core/graph/graph.cc b/onnxruntime/core/graph/graph.cc index 8265bbb70b0a8..b593e0b5bb656 100644 --- a/onnxruntime/core/graph/graph.cc +++ b/onnxruntime/core/graph/graph.cc @@ -39,7 +39,7 @@ static Status MergeShapeInfo(const std::string& output_name, try { ONNX_NAMESPACE::mergeInShapeInfo(source, target); } catch (const ONNX_NAMESPACE::InferenceError& ex) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Output:", output_name, " ", ex.what()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Output:", output_name, " ", ex.what()); } return Status::OK(); @@ -141,8 +141,8 @@ common::Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& inpu const auto input_type_case = input_type.value_case(); if (current_type_case != input_type_case) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type mismatch. Current=", - current_type_case, " Input=", input_type_case); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type mismatch. Current=", + current_type_case, " Input=", input_type_case); switch (input_type_case) { case TypeProto::kTensorType: { @@ -151,14 +151,14 @@ common::Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& inpu const auto& current_tensor_elem_type = current_type.tensor_type().elem_type(); if (input_tensor_elem_type != current_tensor_elem_type) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Tensor element type mismatch. ", - TensorProto_DataType_Name(input_tensor_elem_type), " != ", - TensorProto_DataType_Name(current_tensor_elem_type)); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Tensor element type mismatch. ", + TensorProto_DataType_Name(input_tensor_elem_type), " != ", + TensorProto_DataType_Name(current_tensor_elem_type)); if (input_tensor_type.has_shape()) { auto& current_tensor_type = *current_type.mutable_tensor_type(); if (current_tensor_type.has_shape()) { - ONNXRUNTIME_RETURN_IF_ERROR(MergeShapeInfo(Name(), input_tensor_type, current_tensor_type)); + ORT_RETURN_IF_ERROR(MergeShapeInfo(Name(), input_tensor_type, current_tensor_type)); } else { current_tensor_type = input_tensor_type; } @@ -171,9 +171,9 @@ common::Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& inpu const auto input_tensor_elem_type = input_tensor_type.elem_type(); const auto current_tensor_elem_type = current_type.sparse_tensor_type().elem_type(); if (input_tensor_elem_type != current_tensor_elem_type) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "SparseTensor element type mismatch. ", - TensorProto_DataType_Name(input_tensor_elem_type), " != ", - TensorProto_DataType_Name(current_tensor_elem_type)); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "SparseTensor element type mismatch. ", + TensorProto_DataType_Name(input_tensor_elem_type), " != ", + TensorProto_DataType_Name(current_tensor_elem_type)); } if (input_tensor_type.has_shape()) { auto& current_tensor_type = *current_type.mutable_sparse_tensor_type(); @@ -483,9 +483,9 @@ Status Node::UpdateInputArgCount() { definitions_.input_arg_count.cend(), 0); if (total_arg_count < 0 || static_cast(total_arg_count) != definitions_.input_defs.size()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, - "The sum of input arg count is not equal to size of input defs in node (", - name_, ")"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, + "The sum of input arg count is not equal to size of input defs in node (", + name_, ")"); } // op_ is always valid when this is called @@ -615,7 +615,7 @@ Graph::Graph(GraphProto* graph_proto, model_functions_(model_functions), ir_version_(ir_version), parent_graph_{parent_graph} { - ONNXRUNTIME_ENFORCE(graph_proto != nullptr, "graph_proto cannot be null"); + ORT_ENFORCE(graph_proto != nullptr, "graph_proto cannot be null"); ArgNameToTypeMap name_to_type_map; // these are all empty unless we received a graph_proto as input @@ -773,7 +773,7 @@ common::Status Graph::SetOuterScopeNodeArgs(const std::unordered_setMutableSubgraphs()) { auto status = subgraph->SetOuterScopeNodeArgs(node_args_in_scope_for_subgraph); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); } } } @@ -797,7 +797,7 @@ void Graph::AddEdge(NodeIndex src_node_index, NodeIndex dst_node_index, int src_ nullptr == nodes_[src_node_index] || nullptr == nodes_[dst_node_index]) { // Invalid node indexes specified. - ONNXRUNTIME_THROW("Invalid node indexes specified when adding edge."); + ORT_THROW("Invalid node indexes specified when adding edge."); } NodeArg *src_arg = nullptr, *dst_arg = nullptr; @@ -806,7 +806,7 @@ void Graph::AddEdge(NodeIndex src_node_index, NodeIndex dst_node_index, int src_ } if (nullptr == src_arg) { - ONNXRUNTIME_THROW("Invalid source node arg slot specified when adding edge."); + ORT_THROW("Invalid source node arg slot specified when adding edge."); } auto& dst_node_defs = nodes_[dst_node_index]->MutableDefinitions(); @@ -822,13 +822,13 @@ void Graph::AddEdge(NodeIndex src_node_index, NodeIndex dst_node_index, int src_ } } if (nullptr == dst_arg) { - ONNXRUNTIME_THROW("Invalid destination node arg slot specified when adding edge."); + ORT_THROW("Invalid destination node arg slot specified when adding edge."); } if (src_arg != dst_arg) { if (src_arg->Type() != dst_arg->Type()) { // The output type of source node arg does not match the input type of destination node arg. - ONNXRUNTIME_THROW("Argument type mismatch when adding edge."); + ORT_THROW("Argument type mismatch when adding edge."); } else { src_arg->UpdateTypeAndShape(*dst_arg); *dst_arg_pointer = src_arg; @@ -845,7 +845,7 @@ void Graph::RemoveEdge(NodeIndex src_node_index, NodeIndex dst_node_index, int s nullptr == nodes_[src_node_index] || nullptr == nodes_[dst_node_index]) { // Invalid node indexes specified. - ONNXRUNTIME_THROW("Invalid node indexes specified when removing edge."); + ORT_THROW("Invalid node indexes specified when removing edge."); } const NodeArg *src_arg = nullptr, *dst_arg = nullptr; @@ -854,7 +854,7 @@ void Graph::RemoveEdge(NodeIndex src_node_index, NodeIndex dst_node_index, int s } if (nullptr == src_arg) { - ONNXRUNTIME_THROW("Invalid source node arg slot specified when removing edge."); + ORT_THROW("Invalid source node arg slot specified when removing edge."); } auto& dst_node_defs = nodes_[dst_node_index]->GetDefinitions(); @@ -867,12 +867,12 @@ void Graph::RemoveEdge(NodeIndex src_node_index, NodeIndex dst_node_index, int s } } if (nullptr == dst_arg) { - ONNXRUNTIME_THROW("Invalid destination node arg slot specified when removing edge."); + ORT_THROW("Invalid destination node arg slot specified when removing edge."); } if (src_arg != dst_arg) { // The edge ends specified by source and destination arg slot are not referring to same node arg. // It means there was no edge between these two slots before. - ONNXRUNTIME_THROW("Argument type mismatch when removing edge."); + ORT_THROW("Argument type mismatch when removing edge."); } nodes_[dst_node_index]->MutableRelationships().input_edges.erase(Node::EdgeEnd(*nodes_[src_node_index], src_arg_slot, dst_arg_slot)); @@ -901,7 +901,7 @@ Status Graph::BuildConnections(std::vector& outer_scope_node_args_c outer_scope_node_args_consumed.push_back(node_arg_name); if (!parent_graph_) { - return ONNXRUNTIME_MAKE_STATUS( + return ORT_MAKE_STATUS( ONNXRUNTIME, INVALID_GRAPH, "At top level graph without matching NodeArg that subgraph consumes. Name=", node_arg_name, @@ -912,7 +912,7 @@ Status Graph::BuildConnections(std::vector& outer_scope_node_args_c // make sure the node arg is found in the parent graph/s if (!node_arg) { - return ONNXRUNTIME_MAKE_STATUS( + return ORT_MAKE_STATUS( ONNXRUNTIME, INVALID_GRAPH, "Failed to find NodeArg in all parent graphs. Name=", node_arg_name, " Graph may not conform to the ONNX spec and contain initializers that are not graph inputs."); @@ -942,7 +942,7 @@ Status Graph::BuildConnections(std::vector& outer_scope_node_args_c } else { // if it's an output nodearg in this graph we need to create a link to the node the output is coming from auto entry = resolve_context_.output_args.find(node_arg_name); - ONNXRUNTIME_ENFORCE(entry != resolve_context_.output_args.end()); + ORT_ENFORCE(entry != resolve_context_.output_args.end()); // Create relationship between this node (node), and the node providing the output (output_node). Node& output_node = *entry->second.first; @@ -1322,8 +1322,8 @@ Status Graph::InferAndVerifySubgraphTypes(const Node& node, Graph& subgraph, auto num_subgraph_inputs = subgraph_inputs.size(); if (num_subgraph_inputs != input_types.size()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Size mismatch validating subgraph inputs. Got ", - input_types.size(), " inputs but subgraph requires ", subgraph_inputs.size()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Size mismatch validating subgraph inputs. Got ", + input_types.size(), " inputs but subgraph requires ", subgraph_inputs.size()); } // apply type/shape info to the subgraph's inputs @@ -1334,7 +1334,7 @@ Status Graph::InferAndVerifySubgraphTypes(const Node& node, Graph& subgraph, NodeArg* mutable_nodearg = subgraph.GetNodeArg(subgraph_input.Name()); status = mutable_nodearg->UpdateTypeAndShape(input_type); if (!status.IsOK()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Node:", node.Name(), " ", status.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Node:", node.Name(), " ", status.ErrorMessage()); } } @@ -1355,18 +1355,18 @@ Status Graph::InferAndVerifySubgraphTypes(const Node& node, Graph& subgraph, status = subgraph_nodearg->UpdateTypeAndShape(*implicit_node_arg); if (!status.IsOK()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Node:", node.Name(), " ", status.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Node:", node.Name(), " ", status.ErrorMessage()); } // all values above us should have a type by now due to ONNX requirements. if (subgraph_nodearg->Type() == nullptr) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph input missing type."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph input missing type."); } // now that we have handled the input types, do the type/shape inferencing for the subgraph // to flow the type/shape info through it status = subgraph.PerformTypeAndShapeInferencing(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); auto& subgraph_outputs = subgraph.GetOutputs(); for (const auto* output : subgraph_outputs) { @@ -1540,7 +1540,7 @@ Status Graph::InferAndVerifyTypeMatch(Node& node, const OpSchema& op) { (*merge_target.mutable_shape()) = *output_def->Shape(); auto status = MergeShapeInfo(output_def->Name(), tensor_type, merge_target); if (!status.IsOK()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Node:", node_name, " ", status.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Node:", node_name, " ", status.ErrorMessage()); } output_def->SetShape(merge_target.shape()); @@ -1644,7 +1644,7 @@ Status Graph::VerifyNodeAndOpMatch() { try { checker::check_node(node_proto, ctx, lsc); } catch (const std::exception& ex) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_GRAPH, "Node:", node_name, " ", ex.what()); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_GRAPH, "Node:", node_name, " ", ex.what()); } auto maxInclusiveVersion = DomainToVersionMap().find(domain)->second; @@ -1667,7 +1667,7 @@ Status Graph::VerifyNodeAndOpMatch() { } } - ONNXRUNTIME_RETURN_IF_ERROR(node.UpdateInputArgCount()); + ORT_RETURN_IF_ERROR(node.UpdateInputArgCount()); // currently an Op is required by ValidateVersion, so we use gsl::not_null to validate that. // This may change in the future to allow a null Op @@ -1696,7 +1696,7 @@ Status Graph::VerifyNodeAndOpMatch() { } } - NO_CHANGE_ON_SYNC_FLAG(ONNXRUNTIME_RETURN_IF_ERROR(InferAndVerifyTypeMatch(node, *p_op))); + NO_CHANGE_ON_SYNC_FLAG(ORT_RETURN_IF_ERROR(InferAndVerifyTypeMatch(node, *p_op))); // Accumulate output names of the iterated Node for (auto& output_name : node_proto.output()) { @@ -1756,15 +1756,15 @@ Status Graph::InitInputsInitializersOutputs() { } } - ONNXRUNTIME_RETURN_IF_ERROR(SetGraphInputsOutputs()); - ONNXRUNTIME_RETURN_IF_ERROR(VerifyInputAndInitializerNames()); - ONNXRUNTIME_RETURN_IF_ERROR(VerifyNoDuplicateName()); + ORT_RETURN_IF_ERROR(SetGraphInputsOutputs()); + ORT_RETURN_IF_ERROR(VerifyInputAndInitializerNames()); + ORT_RETURN_IF_ERROR(VerifyNoDuplicateName()); return Status::OK(); } Status Graph::PerformTypeAndShapeInferencing() { - ONNXRUNTIME_RETURN_IF_ERROR(TypeCheckInputsAndInitializers()); + ORT_RETURN_IF_ERROR(TypeCheckInputsAndInitializers()); // type/shape inferencing on the nodes is done recursively as we need subgraph outputs // to be applied to Node outputs for the node containing the subgraph. @@ -1778,18 +1778,18 @@ Status Graph::PerformTypeAndShapeInferencing() { // for all nodes in the subgraph. This leads to recursively handling all subgraphs contained in the node. // - once we finish processing the subgraph/s we apply resultant type/shape information to the outputs // of the node that contains the subgraph. - ONNXRUNTIME_RETURN_IF_ERROR(VerifyNodeAndOpMatch()); + ORT_RETURN_IF_ERROR(VerifyNodeAndOpMatch()); return Status::OK(); } Status Graph::ForThisAndAllSubgraphs(const std::vector& subgraphs, std::function func) { auto status = func(*this); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); for (auto& subgraph : subgraphs) { status = func(*subgraph); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); } return status; @@ -1822,26 +1822,26 @@ Status Graph::Resolve(bool no_proto_sync_required) { // init all graph/subgraphs. non-recursive. auto init_func = [](Graph& graph) { return graph.InitInputsInitializersOutputs(); }; - ONNXRUNTIME_RETURN_IF_ERROR(ForThisAndAllSubgraphs(all_subgraphs, init_func)); + ORT_RETURN_IF_ERROR(ForThisAndAllSubgraphs(all_subgraphs, init_func)); // recursively set the outer scope node args. - ONNXRUNTIME_RETURN_IF_ERROR(SetOuterScopeNodeArgs(resolve_context_.outer_scope_node_args)); + ORT_RETURN_IF_ERROR(SetOuterScopeNodeArgs(resolve_context_.outer_scope_node_args)); std::vector outer_scope_node_args_consumed; // recursively build connections between nodes in this graph and all subgraphs - ONNXRUNTIME_RETURN_IF_ERROR(BuildConnections(outer_scope_node_args_consumed)); - ONNXRUNTIME_ENFORCE(outer_scope_node_args_consumed.empty(), - "Shouldn't be possible to have NodeArgs that haven't been handled already."); + ORT_RETURN_IF_ERROR(BuildConnections(outer_scope_node_args_consumed)); + ORT_ENFORCE(outer_scope_node_args_consumed.empty(), + "Shouldn't be possible to have NodeArgs that haven't been handled already."); // topological sort of this and any subgraphs is non-recursive auto topo_sort_func = [](Graph& graph) { return graph.PerformTopologicalSortAndCheckIsAcyclic(); }; - ONNXRUNTIME_RETURN_IF_ERROR(ForThisAndAllSubgraphs(all_subgraphs, topo_sort_func)); + ORT_RETURN_IF_ERROR(ForThisAndAllSubgraphs(all_subgraphs, topo_sort_func)); // type/shape validation and inferencing on this and any subgraphs // recurses into subgraphs via the ONNX checker, which descends into the GraphProto in node attributes // which define a subgraph. - ONNXRUNTIME_RETURN_IF_ERROR(PerformTypeAndShapeInferencing()); + ORT_RETURN_IF_ERROR(PerformTypeAndShapeInferencing()); // perform the final steps for this graph and all subgraphs auto finalize_func = [&no_proto_sync_required](Graph& graph) { @@ -1856,7 +1856,7 @@ Status Graph::Resolve(bool no_proto_sync_required) { return Status::OK(); }; - ONNXRUNTIME_RETURN_IF_ERROR(ForThisAndAllSubgraphs(all_subgraphs, finalize_func)); + ORT_RETURN_IF_ERROR(ForThisAndAllSubgraphs(all_subgraphs, finalize_func)); return Status::OK(); } @@ -1894,7 +1894,7 @@ void Graph::AddInitializedTensor(const TensorProto& tensor) { for (auto dim : tensor.dims()) shape->add_dim()->set_dim_value(dim); - ONNXRUNTIME_IGNORE_RETURN_VALUE(GetOrCreateNodeArg(tensor.name(), &t)); + ORT_IGNORE_RETURN_VALUE(GetOrCreateNodeArg(tensor.name(), &t)); } SetGraphProtoSyncNeeded(); @@ -2059,7 +2059,7 @@ bool Graph::RemoveNode(NodeIndex p_index) { auto node = GetNode(p_index); if (nullptr == node /*|| 0 != node->GetRelationships().output_edges.size()*/) { // Node should be removed after all out edges are removed. - // TODO: add the check commented out back. + // TODO: add the check commented out back. return false; } @@ -2166,20 +2166,20 @@ void Graph::CleanUnusedInitializers() { const auto& outputs = GetOutputs(); std::for_each(inputs.cbegin(), inputs.cend(), [&used_args](const NodeArg* input) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(used_args.insert(input->Name())); + ORT_IGNORE_RETURN_VALUE(used_args.insert(input->Name())); }); std::for_each(outputs.cbegin(), outputs.cend(), [&used_args](const NodeArg* output) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(used_args.insert(output->Name())); + ORT_IGNORE_RETURN_VALUE(used_args.insert(output->Name())); }); for (const auto& node : Nodes()) { for (const auto* def : node.InputDefs()) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(used_args.insert(def->Name())); + ORT_IGNORE_RETURN_VALUE(used_args.insert(def->Name())); } for (const auto* def : node.ImplicitInputDefs()) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(used_args.insert(def->Name())); + ORT_IGNORE_RETURN_VALUE(used_args.insert(def->Name())); } } @@ -2239,7 +2239,7 @@ Status Graph::SetGraphInputsOutputs() { // add all graph inputs to input_name_to_node_arg auto& name = graph_input.name(); const auto* node_arg = GetNodeArg(name); - ONNXRUNTIME_ENFORCE(node_arg, "Graph ctor should have created NodeArg for initializer."); + ORT_ENFORCE(node_arg, "Graph ctor should have created NodeArg for initializer."); input_name_to_node_arg.insert({name, node_arg}); // only add non-initializer to specified_graph_inputs @@ -2250,7 +2250,7 @@ Status Graph::SetGraphInputsOutputs() { // add non-initializer outputs for (const auto& node : Nodes()) { for (const auto* output_def : node.OutputDefs()) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(specified_graph_outputs.erase(output_def->Name())); + ORT_IGNORE_RETURN_VALUE(specified_graph_outputs.erase(output_def->Name())); output_name_to_node_arg.insert({output_def->Name(), output_def}); } } @@ -2258,7 +2258,7 @@ Status Graph::SetGraphInputsOutputs() { // add any outputs using initializer if (specified_graph_outputs.size() > 0) { for (const auto& name : specified_initializers) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(specified_graph_outputs.erase(name)); + ORT_IGNORE_RETURN_VALUE(specified_graph_outputs.erase(name)); output_name_to_node_arg.insert({name, GetNodeArg(name)}); } } @@ -2309,8 +2309,8 @@ Status Graph::SetGraphInputsOutputs() { for (auto& graph_input : graph_proto_->input()) { auto& name = graph_input.name(); auto node_arg_iter = input_name_to_node_arg.find(name); - ONNXRUNTIME_ENFORCE(node_arg_iter != input_name_to_node_arg.cend(), - "All inputs and initializers should have entries. Missing ", name); + ORT_ENFORCE(node_arg_iter != input_name_to_node_arg.cend(), + "All inputs and initializers should have entries. Missing ", name); graph_inputs_including_initializers_.push_back(node_arg_iter->second); @@ -2330,7 +2330,7 @@ Status Graph::SetGraphInputsOutputs() { // add any explicitly ordered inputs for (auto* node_arg : graph_input_order_) { if (!node_arg || !node_arg->Exists()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid entry in explicitly ordered inputs"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid entry in explicitly ordered inputs"); } added_input_names.insert(node_arg->Name()); @@ -2343,7 +2343,7 @@ Status Graph::SetGraphInputsOutputs() { // add any explicitly ordered outputs for (auto* node_arg : graph_output_order_) { if (!node_arg || !node_arg->Exists()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid entry in explicitly ordered outputs"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid entry in explicitly ordered outputs"); } output_name_to_node_arg.insert({node_arg->Name(), node_arg}); ordered_output_names.push_back(node_arg->Name()); @@ -2456,10 +2456,10 @@ IOnnxRuntimeOpSchemaCollectionPtr Graph::GetSchemaRegistry() const { Node& Graph::FuseSubGraph(std::unique_ptr<::onnxruntime::IndexedSubGraph> sub_graph, const std::string& fused_node_name) { - ONNXRUNTIME_ENFORCE(nullptr != sub_graph && nullptr != sub_graph->GetMetaDef()); + ORT_ENFORCE(nullptr != sub_graph && nullptr != sub_graph->GetMetaDef()); auto func_meta_def = sub_graph->GetMetaDef(); - ONNXRUNTIME_ENFORCE(nullptr != func_meta_def); + ORT_ENFORCE(nullptr != func_meta_def); std::vector input_args, output_args; for (auto& arg_name : func_meta_def->inputs) { input_args.push_back(GetNodeArg(arg_name)); @@ -2508,7 +2508,7 @@ Status Graph::InlineFunction(Node& node) { for (const auto& subgraph_node : subgraph.Nodes()) { AddNode(subgraph_node); } - ONNXRUNTIME_RETURN_IF_ERROR(this->Resolve()); + ORT_RETURN_IF_ERROR(this->Resolve()); return Status::OK(); } diff --git a/onnxruntime/core/graph/graph_transformer.cc b/onnxruntime/core/graph/graph_transformer.cc index bcb5fa03bf66f..46294f0ae8a85 100644 --- a/onnxruntime/core/graph/graph_transformer.cc +++ b/onnxruntime/core/graph/graph_transformer.cc @@ -17,7 +17,7 @@ Status RuleBasedGraphTransformer::Register(const std::string& op_type, std::uniq } Status TopDownRuleBasedTransformer::Apply(Graph& graph, bool& modified) const { - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); GraphViewer graph_viewer(graph); auto& order = graph_viewer.GetNodesInTopologicalOrder(); @@ -33,13 +33,13 @@ Status TopDownRuleBasedTransformer::Apply(Graph& graph, bool& modified) const { continue; for (const auto& rule : *rules) { - ONNXRUNTIME_RETURN_IF_ERROR(rule->CheckConditionAndApply(graph, *node, modified)); + ORT_RETURN_IF_ERROR(rule->CheckConditionAndApply(graph, *node, modified)); } } // Resolve the graph at the end of all passes. if (modified) { - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); } return Status::OK(); diff --git a/onnxruntime/core/graph/graph_transformer_mgr.h b/onnxruntime/core/graph/graph_transformer_mgr.h index 0ad7026535f5d..dc3e264b69a0e 100644 --- a/onnxruntime/core/graph/graph_transformer_mgr.h +++ b/onnxruntime/core/graph/graph_transformer_mgr.h @@ -26,7 +26,7 @@ class GraphTransformerManager { private: GraphTransformerManager() = default; - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphTransformerManager); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(GraphTransformerManager); std::vector> transformers_; const unsigned steps_; diff --git a/onnxruntime/core/graph/identity_elimination.cc b/onnxruntime/core/graph/identity_elimination.cc index 969d136626359..a2cd4c94c4a9c 100644 --- a/onnxruntime/core/graph/identity_elimination.cc +++ b/onnxruntime/core/graph/identity_elimination.cc @@ -26,7 +26,7 @@ Status EliminateIdentity::Apply(Graph& graph_editor, Node& node, bool& modified) graph_editor.RemoveNode(node.Index()); // TODO: Make sure resolve is not required here. - //ONNXRUNTIME_RETURN_IF_ERROR(graph_editor->Resolve()); + //ORT_RETURN_IF_ERROR(graph_editor->Resolve()); return Status::OK(); } diff --git a/onnxruntime/core/graph/initializer.h b/onnxruntime/core/graph/initializer.h index 67171f84fdfa6..08b7951acda61 100644 --- a/onnxruntime/core/graph/initializer.h +++ b/onnxruntime/core/graph/initializer.h @@ -42,7 +42,7 @@ class Initializer final { break; } default: - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, "data type is not supported"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, "data type is not supported"); break; } } @@ -65,7 +65,7 @@ class Initializer final { switch (data_type_) { case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: { int64_t size = tensor_proto->float_data_size(); - ONNXRUNTIME_ENFORCE(size_ == size, "size is different"); + ORT_ENFORCE(size_ == size, "size is different"); for (int i = 0; i < size_; i++) { float_data_.push_back(tensor_proto->float_data(i)); } @@ -73,14 +73,14 @@ class Initializer final { } case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: { int64_t size = tensor_proto->double_data_size(); - ONNXRUNTIME_ENFORCE(size_ == size, "size is different"); + ORT_ENFORCE(size_ == size, "size is different"); for (int i = 0; i < size_; i++) { double_data_.push_back(tensor_proto->double_data(i)); } break; } default: - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, "data type is not supported"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, "data type is not supported"); break; } } @@ -122,7 +122,7 @@ class Initializer final { break; } default: - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, "data type is not supported"); + ORT_NOT_IMPLEMENTED(__FUNCTION__, "data type is not supported"); break; } } @@ -342,7 +342,7 @@ class Initializer final { num *= dims_[k]; } - int64_t n = size()/num; + int64_t n = size() / num; switch (data_type_) { case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: { float* dst = data(); diff --git a/onnxruntime/core/graph/model.cc b/onnxruntime/core/graph/model.cc index 67b73e68b0bfb..da003fea14107 100644 --- a/onnxruntime/core/graph/model.cc +++ b/onnxruntime/core/graph/model.cc @@ -228,7 +228,7 @@ Status Model::Load(const ModelProto& model_proto, std::shared_ptr& model, return Status(ONNXRUNTIME, INVALID_ARGUMENT, "Failed to load model with error: " + std::string(ex.what())); } - ONNXRUNTIME_RETURN_IF_ERROR(model->MainGraph().Resolve(true)); + ORT_RETURN_IF_ERROR(model->MainGraph().Resolve(true)); return Status::OK(); } @@ -247,7 +247,7 @@ Status Model::Load(std::unique_ptr p_model_proto, std::shared_ptrMainGraph().Resolve(true)); + ORT_RETURN_IF_ERROR(model->MainGraph().Resolve(true)); return Status::OK(); } @@ -260,11 +260,11 @@ static Status LoadModel(const T& file_path, std::shared_ptr& p_model, con if (status.Category() == common::SYSTEM) { switch (status.Code()) { case ENOENT: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, NO_SUCHFILE, "Load model failed. File doesn't exist"); + return ORT_MAKE_STATUS(ONNXRUNTIME, NO_SUCHFILE, "Load model failed. File doesn't exist"); case EINVAL: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT); default: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "system error number ", status.Code()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "system error number ", status.Code()); } } } @@ -272,12 +272,12 @@ static Status LoadModel(const T& file_path, std::shared_ptr& p_model, con status = Model::Load(fd, p_model, local_registries); } catch (std::exception& ex) { GSL_SUPPRESS(es .84) - ONNXRUNTIME_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); + ORT_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); return Status(ONNXRUNTIME, FAIL, ex.what()); } if (!status.IsOK()) { GSL_SUPPRESS(es .84) - ONNXRUNTIME_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); + ORT_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); return status; } return Env::Default().FileClose(fd); @@ -287,17 +287,17 @@ template static Status SaveModel(Model& model, const T& file_path) { int fd; Status status = Env::Default().FileOpenWr(file_path, fd); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); try { status = Model::Save(model, fd); } catch (std::exception& ex) { GSL_SUPPRESS(es .84) - ONNXRUNTIME_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); + ORT_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); return Status(ONNXRUNTIME, FAIL, ex.what()); } if (!status.IsOK()) { GSL_SUPPRESS(es .84) - ONNXRUNTIME_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); + ORT_IGNORE_RETURN_VALUE(Env::Default().FileClose(fd)); return status; } return Env::Default().FileClose(fd); @@ -335,7 +335,7 @@ Status Model::LoadFromBytes(int count, void* p_bytes, /*out*/ std::shared_ptr(std::move(modelProto), local_registries); - ONNXRUNTIME_RETURN_IF_ERROR(p_model->MainGraph().Resolve(true)); + ORT_RETURN_IF_ERROR(p_model->MainGraph().Resolve(true)); return Status::OK(); } @@ -366,7 +366,7 @@ Status Model::Load(int fd, std::shared_ptr& p_model, const IOnnxRuntimeOp p_model = std::make_shared(std::move(model_proto), local_registries); - ONNXRUNTIME_RETURN_IF_ERROR(p_model->MainGraph().Resolve(true)); + ORT_RETURN_IF_ERROR(p_model->MainGraph().Resolve(true)); return Status::OK(); } @@ -376,7 +376,7 @@ Status Model::Save(Model& model, int p_fd) { return Status(ONNXRUNTIME, INVALID_ARGUMENT, " is less than 0."); } - ONNXRUNTIME_RETURN_IF_ERROR(model.MainGraph().Resolve()); + ORT_RETURN_IF_ERROR(model.MainGraph().Resolve()); auto model_proto = model.ToProto(); const bool result = model_proto.SerializeToFileDescriptor(p_fd); diff --git a/onnxruntime/core/graph/record.h b/onnxruntime/core/graph/record.h index 27e9e142d8536..da0402e97b48e 100644 --- a/onnxruntime/core/graph/record.h +++ b/onnxruntime/core/graph/record.h @@ -21,7 +21,7 @@ class Record { Record() = default; Record(const std::vector& names, const Values& values) { - ONNXRUNTIME_ENFORCE(std::tuple_size::value == names.size(), + ORT_ENFORCE(std::tuple_size::value == names.size(), "Parameter sizes do not match. %d != %d", std::tuple_size::value, names.size()); names_ = names; values_ = values; diff --git a/onnxruntime/core/graph/schema_registry.cc b/onnxruntime/core/graph/schema_registry.cc index 608083d1e96ae..1f571bbcb440a 100644 --- a/onnxruntime/core/graph/schema_registry.cc +++ b/onnxruntime/core/graph/schema_registry.cc @@ -39,9 +39,9 @@ common::Status OnnxRuntimeOpSchemaRegistry::RegisterOpSet( const std::string& domain, int baseline_opset_version, int opset_version) { - ONNXRUNTIME_RETURN_IF_ERROR(SetBaselineAndOpsetVersionForDomain(domain, baseline_opset_version, opset_version)); + ORT_RETURN_IF_ERROR(SetBaselineAndOpsetVersionForDomain(domain, baseline_opset_version, opset_version)); for (auto& schema : schemas) - ONNXRUNTIME_RETURN_IF_ERROR(RegisterOpSchema(std::move(schema))); + ORT_RETURN_IF_ERROR(RegisterOpSchema(std::move(schema))); return common::Status::OK(); } diff --git a/onnxruntime/core/graph/unsqueeze_elimination.cc b/onnxruntime/core/graph/unsqueeze_elimination.cc index ab3ee845ec1ea..1d5cf267dd7cf 100644 --- a/onnxruntime/core/graph/unsqueeze_elimination.cc +++ b/onnxruntime/core/graph/unsqueeze_elimination.cc @@ -92,7 +92,7 @@ Status UnsqueezeElimination::Apply(onnxruntime::Graph& graph, bool& modified) co if (!removed_nodes.empty()) { modified = true; - ONNXRUNTIME_RETURN_IF_ERROR(graph.Resolve()); + ORT_RETURN_IF_ERROR(graph.Resolve()); } return Status::OK(); } diff --git a/onnxruntime/core/platform/env.h b/onnxruntime/core/platform/env.h index f4577bc7362df..208b887a0b518 100644 --- a/onnxruntime/core/platform/env.h +++ b/onnxruntime/core/platform/env.h @@ -145,7 +145,7 @@ class Env { Env(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Env); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Env); EnvTime* env_time_ = EnvTime::Default(); }; @@ -158,7 +158,7 @@ class Thread { virtual ~Thread(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Thread); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Thread); }; /// \brief Options to configure a Thread. diff --git a/onnxruntime/core/platform/windows/env.cc b/onnxruntime/core/platform/windows/env.cc index 2a157af8de0ad..5376dfdcbdeda 100644 --- a/onnxruntime/core/platform/windows/env.cc +++ b/onnxruntime/core/platform/windows/env.cc @@ -58,7 +58,7 @@ class WindowsEnv : public Env { SYSTEM_INFO sysInfo; GetSystemInfo(&sysInfo); if (sysInfo.dwNumberOfProcessors <= 0) { - ONNXRUNTIME_THROW("Fatal error: 0 count processors from GetSystemInfo"); + ORT_THROW("Fatal error: 0 count processors from GetSystemInfo"); } // This is the number of logical processors in the current group return sysInfo.dwNumberOfProcessors; @@ -70,7 +70,7 @@ class WindowsEnv : public Env { ++processorCoreCount; } } - if (!processorCoreCount) ONNXRUNTIME_THROW("Fatal error: 0 count processors from GetLogicalProcessorInformation"); + if (!processorCoreCount) ORT_THROW("Fatal error: 0 count processors from GetLogicalProcessorInformation"); return processorCoreCount; } @@ -135,27 +135,27 @@ class WindowsEnv : public Env { } virtual Status LoadDynamicLibrary(const std::string& library_filename, void** handle) const override { - ONNXRUNTIME_UNUSED_PARAMETER(library_filename); - ONNXRUNTIME_UNUSED_PARAMETER(handle); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(library_filename); + ORT_UNUSED_PARAMETER(handle); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } virtual common::Status UnloadDynamicLibrary(void* handle) const override { - ONNXRUNTIME_UNUSED_PARAMETER(handle); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(handle); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } virtual Status GetSymbolFromLibrary(void* handle, const std::string& symbol_name, void** symbol) const override { - ONNXRUNTIME_UNUSED_PARAMETER(handle); - ONNXRUNTIME_UNUSED_PARAMETER(symbol_name); - ONNXRUNTIME_UNUSED_PARAMETER(symbol); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(handle); + ORT_UNUSED_PARAMETER(symbol_name); + ORT_UNUSED_PARAMETER(symbol); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } virtual std::string FormatLibraryFileName(const std::string& name, const std::string& version) const override { - ONNXRUNTIME_UNUSED_PARAMETER(name); - ONNXRUNTIME_UNUSED_PARAMETER(version); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(name); + ORT_UNUSED_PARAMETER(version); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); } private: diff --git a/onnxruntime/core/platform/windows/logging/etw_sink.cc b/onnxruntime/core/platform/windows/logging/etw_sink.cc index b5831f795711b..165420e9aafa7 100644 --- a/onnxruntime/core/platform/windows/logging/etw_sink.cc +++ b/onnxruntime/core/platform/windows/logging/etw_sink.cc @@ -60,7 +60,7 @@ class EtwRegistrationManager { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(EtwRegistrationManager); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(EtwRegistrationManager); EtwRegistrationManager(const HRESULT status) noexcept : etw_status_{status} {} const HRESULT etw_status_; diff --git a/onnxruntime/core/platform/windows/logging/etw_sink.h b/onnxruntime/core/platform/windows/logging/etw_sink.h index b9c5cd54c420a..1e4f49a619302 100644 --- a/onnxruntime/core/platform/windows/logging/etw_sink.h +++ b/onnxruntime/core/platform/windows/logging/etw_sink.h @@ -33,7 +33,7 @@ class EtwSink : public ISink { constexpr static const char* kEventName = "ONNXRuntimeLogEvent"; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(EtwSink); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(EtwSink); void SendImpl(const Timestamp& timestamp, const std::string& logger_id, const Capture& message) override; diff --git a/onnxruntime/core/platform/windows/stacktrace.cc b/onnxruntime/core/platform/windows/stacktrace.cc index 05cadad70d9ea..08cf3463a51ea 100644 --- a/onnxruntime/core/platform/windows/stacktrace.cc +++ b/onnxruntime/core/platform/windows/stacktrace.cc @@ -83,7 +83,7 @@ class SymbolHelper { } private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SymbolHelper); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(SymbolHelper); HANDLE process_ = GetCurrentProcess(); bool cleanup_ = false; diff --git a/onnxruntime/core/providers/common.h b/onnxruntime/core/providers/common.h index b177c201f9359..d3eefa382924d 100644 --- a/onnxruntime/core/providers/common.h +++ b/onnxruntime/core/providers/common.h @@ -14,7 +14,7 @@ Handle a potentially negative axis. Enforces negative axis is valid. @returns Positive axis. */ inline int64_t HandleNegativeAxis(int64_t axis, int64_t tensor_rank) { - ONNXRUNTIME_ENFORCE(axis >= -tensor_rank && axis <= tensor_rank - 1, "axis ", axis, + ORT_ENFORCE(axis >= -tensor_rank && axis <= tensor_rank - 1, "axis ", axis, " is not in valid range [-", tensor_rank, ",", tensor_rank - 1, "]"); // Handle negative axis return axis = axis < 0 ? axis + tensor_rank : axis; diff --git a/onnxruntime/core/providers/cpu/controlflow/if.cc b/onnxruntime/core/providers/cpu/controlflow/if.cc index df87b830c8416..febad17178f41 100644 --- a/onnxruntime/core/providers/cpu/controlflow/if.cc +++ b/onnxruntime/core/providers/cpu/controlflow/if.cc @@ -93,12 +93,12 @@ Status If::Compute(OpKernelContext* ctx) const { auto attribute = condition ? "then_branch" : "else_branch"; auto* session_state = ctx_internal->SubgraphSessionState(attribute); - ONNXRUNTIME_ENFORCE(session_state, "Subgraph SessionState was not found for '", attribute, "' attribute."); + ORT_ENFORCE(session_state, "Subgraph SessionState was not found for '", attribute, "' attribute."); IfImpl impl{*ctx_internal, *session_state}; auto status = impl.Initialize(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); status = impl.Execute(); @@ -119,7 +119,7 @@ Status IfImpl::Initialize() { auto num_subgraph_outputs = graph_outputs.size(); if (num_subgraph_outputs != num_outputs_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "'If' node has ", num_outputs_, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "'If' node has ", num_outputs_, " outputs which doesn't match the subgraph's ", num_subgraph_outputs, " outputs."); } @@ -132,7 +132,7 @@ Status IfImpl::Initialize() { } auto status = AllocateOutputTensors(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); return Status::OK(); } @@ -144,7 +144,7 @@ Status IfImpl::AllocateOutputTensors() { for (auto& graph_output : subgraph_.GetOutputs()) { auto* graph_output_shape = graph_output->Shape(); if (!graph_output_shape) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph must have the shape set for all outputs but ", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph must have the shape set for all outputs but ", graph_output->Name(), " did not."); } @@ -160,7 +160,7 @@ Status IfImpl::AllocateOutputTensors() { auto* tensor = context_.Output(index, output_shape); if (!tensor) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Failed to create output tensor for ", graph_output->Name()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Failed to create output tensor for ", graph_output->Name()); outputs_.push_back({AllocationType::IfOutput, *context_.GetOutputMLValue(index)}); } @@ -181,7 +181,7 @@ Status IfImpl::Execute() { // pass in implicit inputs as feeds. for (auto& entry : implicit_inputs_) { - ONNXRUNTIME_ENFORCE(entry.second, "All implicit inputs should have MLValue instances by now. ", + ORT_ENFORCE(entry.second, "All implicit inputs should have MLValue instances by now. ", entry.first, " did not."); // prune to values that are in this subgraph as the implicit inputs cover both 'then' and 'else' subgraphs. @@ -202,7 +202,7 @@ Status IfImpl::Execute() { SequentialExecutor executor{context_.GetTerminateFlag()}; status = executor.Execute(session_state_, feeds, subgraph_output_names_, fetches, context_.Logger()); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); for (int i = 0; i < num_outputs_; ++i) { // TODO: Task 1913: Improve handling of If outputs to avoid copy when the shape is not known diff --git a/onnxruntime/core/providers/cpu/controlflow/if.h b/onnxruntime/core/providers/cpu/controlflow/if.h index e227dd6cc2b0d..6675739b7dc1c 100644 --- a/onnxruntime/core/providers/cpu/controlflow/if.h +++ b/onnxruntime/core/providers/cpu/controlflow/if.h @@ -19,9 +19,9 @@ class If final : public OpKernel { // and a SessionState instance for executing the subgraph is created by InferenceSession. // This is available via Info().GetSubgraphSessionState("attribute_name") when Compute is called. ONNX_NAMESPACE::GraphProto proto; - ONNXRUNTIME_ENFORCE(info.GetAttr("then_branch", &proto).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("else_branch", &proto).IsOK()); - ONNXRUNTIME_IGNORE_RETURN_VALUE(proto); + ORT_ENFORCE(info.GetAttr("then_branch", &proto).IsOK()); + ORT_ENFORCE(info.GetAttr("else_branch", &proto).IsOK()); + ORT_IGNORE_RETURN_VALUE(proto); } Status Compute(OpKernelContext* ctx) const override; diff --git a/onnxruntime/core/providers/cpu/controlflow/loop.cc b/onnxruntime/core/providers/cpu/controlflow/loop.cc index 1bd429923403f..454100421dc0d 100644 --- a/onnxruntime/core/providers/cpu/controlflow/loop.cc +++ b/onnxruntime/core/providers/cpu/controlflow/loop.cc @@ -132,12 +132,12 @@ class LoopImpl { Status Loop::Compute(OpKernelContext* ctx) const { auto ctx_internal = static_cast(ctx); auto* session_state = ctx_internal->SubgraphSessionState("body"); - ONNXRUNTIME_ENFORCE(session_state, "Subgraph SessionState was not found for 'body' attribute."); + ORT_ENFORCE(session_state, "Subgraph SessionState was not found for 'body' attribute."); LoopImpl loop_impl{*ctx_internal, *session_state}; auto status = loop_impl.Initialize(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); status = loop_impl.Execute(); @@ -186,7 +186,7 @@ Status LoopImpl::Initialize() { // and that value is in num_subgraph_inputs_. // validate that the subgraph has that many inputs. if (num_subgraph_inputs_ != subgraph_inputs.size()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Graph in 'body' attribute of Loop should have ", num_subgraph_inputs_, " inputs. Found:", subgraph_.GetInputs().size()); } @@ -196,14 +196,14 @@ Status LoopImpl::Initialize() { // check num outputs are correct. the 'cond' output from the subgraph is not a Loop output, so diff is 1 if (num_subgraph_outputs - 1 != num_outputs_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "'Loop' node has ", num_outputs_, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "'Loop' node has ", num_outputs_, " outputs so the subgraph requires ", num_outputs_ + 1, " but has ", num_subgraph_outputs); } AllocatorPtr allocator; status = context_.GetTempSpaceAllocator(&allocator); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); condition_mlvalue_ = MakeScalarMLValue(allocator, condition_); iter_num_mlvalue_ = MakeScalarMLValue(allocator, 0); @@ -241,7 +241,7 @@ NameMLValMap LoopImpl::CreateInitialFeeds() { // pass in implicit inputs as feeds. for (auto& entry : implicit_inputs_) { - ONNXRUNTIME_ENFORCE(entry.second, "All implicit inputs should have MLValue instances by now. ", + ORT_ENFORCE(entry.second, "All implicit inputs should have MLValue instances by now. ", entry.first, " did not."); feeds[entry.first] = *entry.second; } @@ -289,7 +289,7 @@ Status LoopImpl::ConcatenateLoopOutput(std::vector& per_iteration_outpu // sanity check if (bytes_per_iteration != iteration_data.Size()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Inconsistent shape in loop output for output ", output_index, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Inconsistent shape in loop output for output ", output_index, " Expected:", per_iteration_shape, " Got:", iteration_data.Shape()); } @@ -318,7 +318,7 @@ Status LoopImpl::Execute() { SequentialExecutor executor{context_.GetTerminateFlag()}; status = executor.Execute(session_state_, feeds, subgraph_output_names_, fetches, context_.Logger()); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); condition_mlvalue_ = fetches[0]; @@ -346,7 +346,7 @@ Status LoopImpl::Execute() { auto& per_iteration_outputs = loop_output_tensors_[i - num_loop_carried_vars_]; per_iteration_outputs.push_back(fetches[i + 1]); // skip cond - ONNXRUNTIME_RETURN_IF_ERROR(ConcatenateLoopOutput(per_iteration_outputs, i)); + ORT_RETURN_IF_ERROR(ConcatenateLoopOutput(per_iteration_outputs, i)); } } else { // no iterations. @@ -358,7 +358,7 @@ Status LoopImpl::Execute() { // create empty outputs for loop outputs TensorShape empty; for (int i = num_loop_carried_vars_; i < num_outputs_; ++i) { - ONNXRUNTIME_IGNORE_RETURN_VALUE(context_.Output(i, empty)); + ORT_IGNORE_RETURN_VALUE(context_.Output(i, empty)); } } return status; diff --git a/onnxruntime/core/providers/cpu/controlflow/loop.h b/onnxruntime/core/providers/cpu/controlflow/loop.h index d21e8d3d3d8b2..1d0ea015f7825 100644 --- a/onnxruntime/core/providers/cpu/controlflow/loop.h +++ b/onnxruntime/core/providers/cpu/controlflow/loop.h @@ -17,8 +17,8 @@ class Loop final : public OpKernel { // and a SessionState instance for executing the subgraph is created by InferenceSession. // This is available via Info().GetSubgraphSessionState("attribute_name") when Compute is called. ONNX_NAMESPACE::GraphProto proto; - ONNXRUNTIME_ENFORCE(info.GetAttr("body", &proto).IsOK()); - ONNXRUNTIME_IGNORE_RETURN_VALUE(proto); + ORT_ENFORCE(info.GetAttr("body", &proto).IsOK()); + ORT_IGNORE_RETURN_VALUE(proto); } Status Compute(OpKernelContext* ctx) const override; diff --git a/onnxruntime/core/providers/cpu/controlflow/scan.cc b/onnxruntime/core/providers/cpu/controlflow/scan.cc index b4ea993954c06..0af2c7a2e67d4 100644 --- a/onnxruntime/core/providers/cpu/controlflow/scan.cc +++ b/onnxruntime/core/providers/cpu/controlflow/scan.cc @@ -242,7 +242,7 @@ class ScanImpl { Status Scan::Compute(OpKernelContext* ctx) const { auto ctx_internal = static_cast(ctx); auto* session_state = ctx_internal->SubgraphSessionState("body"); - ONNXRUNTIME_ENFORCE(session_state, "Subgraph SessionState was not found for 'body' attribute."); + ORT_ENFORCE(session_state, "Subgraph SessionState was not found for 'body' attribute."); // TODO: // Consider how usage of ExecutionFrame and SequentialExecutor can be optimized @@ -251,7 +251,7 @@ Status Scan::Compute(OpKernelContext* ctx) const { ScanImpl scan_impl{*ctx_internal, *session_state, num_scan_inputs_, directions_}; auto status = scan_impl.Initialize(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); status = scan_impl.Execute(); @@ -314,7 +314,7 @@ MLValue& LoopStateVariable::Output() { } void LoopStateVariable::Next() { - ONNXRUNTIME_ENFORCE(iteration_num_ < sequence_len_, "Misuse of LoopStateVariable. Attempt to move beyond end of sequence"); + ORT_ENFORCE(iteration_num_ < sequence_len_, "Misuse of LoopStateVariable. Attempt to move beyond end of sequence"); ++iteration_num_; } @@ -328,7 +328,7 @@ static Status MakeShapeConcrete(const TensorShape& per_iteration_shape, TensorSh final_shape[i + final_shape_offset] = per_iteration_shape[i]; } else { if (existing_value != per_iteration_shape[i]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Mismatch between expected shape and shape from first output", final_shape, " is not compatible with ", per_iteration_shape); } @@ -361,14 +361,14 @@ Status OutputIterator::Initialize() { // copy the shape from the input initial value which will have a concrete shape. auto* input = context_.Input(output_index_ + 1); // +1 to skip the sequence_len input status = MakeShapeConcrete(input->Shape(), final_shape_); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); is_concrete_shape_ = true; } if (is_concrete_shape_) { status = AllocateFinalBuffer(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); } else { // use first_output_ } @@ -382,7 +382,7 @@ Status OutputIterator::AllocateFinalBuffer() { auto* tensor = context_.Output(output_index_, final_shape_); if (!tensor) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Failed to create output tensor for output #", output_index_); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Failed to create output tensor for output #", output_index_); // get the output tensor we just created as an MLValue final_output_mlvalue_ = context_.GetOutputMLValue(output_index_); @@ -404,7 +404,7 @@ Status OutputIterator::AllocateFinalBuffer() { } Status OutputIterator::MakeConcrete() { - ONNXRUNTIME_ENFORCE(first_output_.IsAllocated(), "First usage of OutputIterator did not result in any output."); + ORT_ENFORCE(first_output_.IsAllocated(), "First usage of OutputIterator did not result in any output."); Status status = Status::OK(); auto& tensor = first_output_.Get(); @@ -412,11 +412,11 @@ Status OutputIterator::MakeConcrete() { // update the final shape status = MakeShapeConcrete(tensor_shape, final_shape_); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); is_concrete_shape_ = true; status = AllocateFinalBuffer(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); // copy first output to final buffer auto input_span = gsl::make_span(static_cast(tensor.DataRaw()), tensor.Size()); @@ -433,7 +433,7 @@ Status OutputIterator::MakeConcrete() { } MLValue& OutputIterator::operator*() { - ONNXRUNTIME_ENFORCE(cur_iteration_ < num_iterations_); + ORT_ENFORCE(cur_iteration_ < num_iterations_); if (is_concrete_shape_) return **cur_slicer_iterator_; @@ -446,7 +446,7 @@ OutputIterator& OutputIterator::operator++() { if (!is_concrete_shape_) { // we should have an output now, so convert to using the overall output buffer and slicers auto status = MakeConcrete(); - ONNXRUNTIME_ENFORCE(status.IsOK(), status.ErrorMessage()); + ORT_ENFORCE(status.IsOK(), status.ErrorMessage()); } ++cur_iteration_; @@ -482,7 +482,7 @@ ScanImpl::ScanImpl(OpKernelContextInternal& context, Status ScanImpl::Initialize() { auto status = ValidateInput(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); auto& subgraph_outputs = subgraph_.GetOutputs(); subgraph_output_names_.reserve(subgraph_outputs.size()); @@ -494,7 +494,7 @@ Status ScanImpl::Initialize() { } status = AllocateOutputTensors(); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); return Status::OK(); } @@ -526,7 +526,7 @@ Status ScanImpl::ValidateSubgraphInput(int start_input, int end_input, bool is_l const auto& input_shape = input_tensor.Shape(); if (input_shape.NumDimensions() < min_dims_required) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid scan input:", graph_inputs[i]->Name(), + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid scan input:", graph_inputs[i]->Name(), " Expected ", min_dims_required, " dimensions or more but input had shape of ", input_shape); @@ -536,7 +536,7 @@ Status ScanImpl::ValidateSubgraphInput(int start_input, int end_input, bool is_l batch_size_ = this_batch_size; else { if (batch_size_ != this_batch_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Scan inputs have inconsistent batch size. Previous value was ", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Scan inputs have inconsistent batch size. Previous value was ", batch_size_, " but ", graph_inputs[i]->Name(), " has batch size of ", this_batch_size); } @@ -549,7 +549,7 @@ Status ScanImpl::ValidateSubgraphInput(int start_input, int end_input, bool is_l max_sequence_len_ = this_seq_len; } else { if (max_sequence_len_ != this_seq_len) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Scan inputs have inconsistent sequence lengths. Previous value was ", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Scan inputs have inconsistent sequence lengths. Previous value was ", max_sequence_len_, " but ", graph_inputs[i]->Name(), " has length of ", this_seq_len); } @@ -565,23 +565,23 @@ Status ScanImpl::ValidateInput() { auto num_graph_inputs = graph_inputs.size(); if (num_graph_inputs != num_variadic_inputs_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "The subgraph in 'body' expects ", num_graph_inputs, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "The subgraph in 'body' expects ", num_graph_inputs, " inputs but Scan was only given ", num_variadic_inputs_); } // process any loop state variables, which will set the batch size auto status = ValidateSubgraphInput(0, num_loop_state_variables_, true, graph_inputs); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); // process the scan inputs. sets/validates batch size and sequence length status = ValidateSubgraphInput(num_loop_state_variables_, num_variadic_inputs_, false, graph_inputs); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); if (sequence_lens_tensor_ != nullptr) { auto num_entries = sequence_lens_tensor_->Shape().Size(); if (num_entries != batch_size_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "sequence_lens length of ", num_entries, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "sequence_lens length of ", num_entries, " did not match batch size of ", batch_size_); } @@ -590,7 +590,7 @@ Status ScanImpl::ValidateInput() { if (std::all_of(sequence_lens_.cbegin(), sequence_lens_.cend(), [this](int64_t value) { return value > 0 && value <= max_sequence_len_; }) == false) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid entries in sequence_lens. Max sequence length was ", max_sequence_len_); } @@ -608,7 +608,7 @@ Status ScanImpl::AllocateOutput(int index, bool is_loop_state_var) { auto* graph_output_shape = graph_output->Shape(); if (!graph_output_shape) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph must have the shape set for all outputs but ", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph must have the shape set for all outputs but ", graph_output->Name(), " did not."); } @@ -639,18 +639,18 @@ Status ScanImpl::AllocateOutputTensors() { auto& graph_outputs = subgraph_.GetOutputs(); if (graph_outputs.size() != num_variadic_outputs_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph in 'body' produces ", graph_outputs.size(), + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Subgraph in 'body' produces ", graph_outputs.size(), " outputs but Scan expects ", num_variadic_outputs_); } for (int i = 0; i < num_loop_state_variables_; ++i) { status = AllocateOutput(i, true); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); } for (int i = num_loop_state_variables_, end = num_variadic_outputs_; i < end; ++i) { status = AllocateOutput(i, false); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); } return Status::OK(); @@ -672,7 +672,7 @@ Status ScanImpl::CreateLoopStateVariables(std::vector::Create(mlvalue).begin()); } @@ -682,7 +682,7 @@ Status ScanImpl::CreateLoopStateVariables(std::vector> batch_loop_state_variables; status = CreateLoopStateVariables(batch_loop_state_variables); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); for (int64_t b = 0; b < batch_size_; ++b) { // Setup input MLValue streams @@ -739,7 +739,7 @@ Status ScanImpl::Execute() { scan_input_stream_iterators, sequence_lens_[b]); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); } return status; @@ -758,7 +758,7 @@ Status ScanImpl::IterateSequence(std::vector& loop_state_vari // pass in implicit inputs as feeds. for (auto& entry : implicit_inputs_) { - ONNXRUNTIME_ENFORCE(entry.second, "All implicit inputs should have MLValue instances by now. ", + ORT_ENFORCE(entry.second, "All implicit inputs should have MLValue instances by now. ", entry.first, " did not."); feeds[entry.first] = *entry.second; } @@ -812,7 +812,7 @@ Status ScanImpl::IterateSequence(std::vector& loop_state_vari // For now just making it work. Optimization and refinement will follow. SequentialExecutor executor{context_.GetTerminateFlag()}; status = executor.Execute(session_state_, feeds, subgraph_output_names_, fetches, context_.Logger()); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); // cycle the LoopStateVariable input/output in preparation for the next iteration std::for_each(loop_state_variables.begin(), loop_state_variables.end(), [](LoopStateVariable& v) { v.Next(); }); diff --git a/onnxruntime/core/providers/cpu/controlflow/scan.h b/onnxruntime/core/providers/cpu/controlflow/scan.h index c7263c200b8dc..4fc6bd6e67e13 100644 --- a/onnxruntime/core/providers/cpu/controlflow/scan.h +++ b/onnxruntime/core/providers/cpu/controlflow/scan.h @@ -17,16 +17,16 @@ class Scan final : public OpKernel { // and a SessionState instance for executing the subgraph is created by InferenceSession. // This is available via Info().GetSubgraphSessionState("attribute_name") when Compute is called. ONNX_NAMESPACE::GraphProto proto; - ONNXRUNTIME_ENFORCE(info.GetAttr("body", &proto).IsOK()); + ORT_ENFORCE(info.GetAttr("body", &proto).IsOK()); (void)proto; - ONNXRUNTIME_ENFORCE(info.GetAttr("num_scan_inputs", &num_scan_inputs_).IsOK()); + ORT_ENFORCE(info.GetAttr("num_scan_inputs", &num_scan_inputs_).IsOK()); if (info.GetAttrs("directions", directions_).IsOK()) { - ONNXRUNTIME_ENFORCE(gsl::narrow_cast(directions_.size()) == num_scan_inputs_, + ORT_ENFORCE(gsl::narrow_cast(directions_.size()) == num_scan_inputs_, "Number of entries in 'directions' was ", directions_.size(), ". Must match 'num_scan_inputs' of ", num_scan_inputs_); - ONNXRUNTIME_ENFORCE(std::all_of(directions_.cbegin(), directions_.cend(), + ORT_ENFORCE(std::all_of(directions_.cbegin(), directions_.cend(), [](int64_t i) { return i == static_cast(Direction::kForward) || i == static_cast(Direction::kReverse); }), "Invalid values in 'directions'. 0 == forward. 1 == reverse."); diff --git a/onnxruntime/core/providers/cpu/cpu_execution_provider.h b/onnxruntime/core/providers/cpu/cpu_execution_provider.h index 5995282bf5de7..02b890ad01d7d 100644 --- a/onnxruntime/core/providers/cpu/cpu_execution_provider.h +++ b/onnxruntime/core/providers/cpu/cpu_execution_provider.h @@ -25,10 +25,10 @@ using FuseRuleFn = std::function(); }, std::numeric_limits::max()}); #ifdef USE_JEMALLOC - ONNXRUNTIME_UNUSED_PARAMETER(info); + ORT_UNUSED_PARAMETER(info); //JEMalloc already has memory pool, so just use device allocator. InsertAllocator( std::shared_ptr( @@ -53,11 +53,11 @@ class CPUExecutionProvider : public IExecutionProvider { ///requires src.buffer_deleter_ == nullptr Status CopyTensor(const Tensor& src, Tensor& dst) const override { - ONNXRUNTIME_ENFORCE(strcmp(dst.Location().name, CPU) == 0); + ORT_ENFORCE(strcmp(dst.Location().name, CPU) == 0); // Todo: support copy with different devices. if (strcmp(src.Location().name, CPU) != 0) { - ONNXRUNTIME_NOT_IMPLEMENTED("copy from ", src.Location().name, " is not implemented"); + ORT_NOT_IMPLEMENTED("copy from ", src.Location().name, " is not implemented"); } // no really copy needed if is copy to cpu. diff --git a/onnxruntime/core/providers/cpu/cpu_provider_factory.cc b/onnxruntime/core/providers/cpu/cpu_provider_factory.cc index 2ce0e5a9b997e..98813317834a1 100644 --- a/onnxruntime/core/providers/cpu/cpu_provider_factory.cc +++ b/onnxruntime/core/providers/cpu/cpu_provider_factory.cc @@ -9,35 +9,35 @@ using namespace onnxruntime; namespace { struct CpuProviderFactory { - const ONNXRuntimeProviderFactoryInterface* const cls; + const OrtProviderFactoryInterface* const cls; std::atomic_int ref_count; bool create_arena; CpuProviderFactory(); }; -ONNXStatus* ONNXRUNTIME_API_CALL CreateCpu(void* this_, ONNXRuntimeProvider** out) { +ONNXStatus* ORT_API_CALL CreateCpu(void* this_, OrtProvider** out) { CPUExecutionProviderInfo info; CpuProviderFactory* this_ptr = (CpuProviderFactory*)this_; info.create_arena = this_ptr->create_arena; CPUExecutionProvider* ret = new CPUExecutionProvider(info); - *out = (ONNXRuntimeProvider*)ret; + *out = (OrtProvider*)ret; return nullptr; } -uint32_t ONNXRUNTIME_API_CALL ReleaseCpu(void* this_) { +uint32_t ORT_API_CALL ReleaseCpu(void* this_) { CpuProviderFactory* this_ptr = (CpuProviderFactory*)this_; if (--this_ptr->ref_count == 0) delete this_ptr; return 0; } -uint32_t ONNXRUNTIME_API_CALL AddRefCpu(void* this_) { +uint32_t ORT_API_CALL AddRefCpu(void* this_) { CpuProviderFactory* this_ptr = (CpuProviderFactory*)this_; ++this_ptr->ref_count; return 0; } -constexpr ONNXRuntimeProviderFactoryInterface cpu_cls = { +constexpr OrtProviderFactoryInterface cpu_cls = { {AddRefCpu, ReleaseCpu}, CreateCpu, @@ -46,13 +46,13 @@ constexpr ONNXRuntimeProviderFactoryInterface cpu_cls = { CpuProviderFactory::CpuProviderFactory() : cls(&cpu_cls), ref_count(1), create_arena(true) {} } // namespace -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateCpuExecutionProviderFactory, int use_arena, _Out_ ONNXRuntimeProviderFactoryInterface*** out) { +ORT_API_STATUS_IMPL(OrtCreateCpuExecutionProviderFactory, int use_arena, _Out_ OrtProviderFactoryInterface*** out) { CpuProviderFactory* ret = new CpuProviderFactory(); ret->create_arena = (use_arena != 0); - *out = (ONNXRuntimeProviderFactoryInterface**)ret; + *out = (OrtProviderFactoryInterface**)ret; return nullptr; } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateCpuAllocatorInfo, enum ONNXRuntimeAllocatorType type, enum ONNXRuntimeMemType mem_type, _Out_ ONNXRuntimeAllocatorInfo** out) { - return ONNXRuntimeCreateAllocatorInfo(onnxruntime::CPU, type, 0, mem_type, out); +ORT_API_STATUS_IMPL(OrtCreateCpuAllocatorInfo, enum OrtAllocatorType type, enum OrtMemType mem_type, _Out_ OrtAllocatorInfo** out) { + return OrtCreateAllocatorInfo(onnxruntime::CPU, type, 0, mem_type, out); } diff --git a/onnxruntime/core/providers/cpu/generator/constant.cc b/onnxruntime/core/providers/cpu/generator/constant.cc index 72ecb86d05f02..db8f95b463e78 100644 --- a/onnxruntime/core/providers/cpu/generator/constant.cc +++ b/onnxruntime/core/providers/cpu/generator/constant.cc @@ -83,9 +83,9 @@ static Status GenerateConstantOutput(Tensor& Y, TensorProto::DataType dtype, flo GenerateData(Y, value); break; case TensorProto::FLOAT16: - ONNXRUNTIME_NOT_IMPLEMENTED("FLOAT16 is not supported"); + ORT_NOT_IMPLEMENTED("FLOAT16 is not supported"); default: - ONNXRUNTIME_THROW("Unsupported data type of ", dtype); + ORT_THROW("Unsupported data type of ", dtype); } return Status::OK(); @@ -102,7 +102,7 @@ Status ConstantLike::Compute(OpKernelContext* ctx) const { dtype = utils::GetTensorProtoType(*X); } } else { - ONNXRUNTIME_ENFORCE(!shape_.empty(), "Neither Input tensor is not null nor shape attribute exists"); + ORT_ENFORCE(!shape_.empty(), "Neither Input tensor is not null nor shape attribute exists"); Y = ctx->Output(0, TensorShape(shape_)); } diff --git a/onnxruntime/core/providers/cpu/generator/constant.h b/onnxruntime/core/providers/cpu/generator/constant.h index 1558e02fba971..1533822d0c4da 100644 --- a/onnxruntime/core/providers/cpu/generator/constant.h +++ b/onnxruntime/core/providers/cpu/generator/constant.h @@ -16,7 +16,7 @@ class ConstantLike final : public OpKernel { dtype_ = static_cast(dtype); shape_ = info.GetAttrsOrDefault("shape"); - ONNXRUNTIME_ENFORCE(info.GetAttr("value", &value_).IsOK()); + ORT_ENFORCE(info.GetAttr("value", &value_).IsOK()); } Status Compute(OpKernelContext* ctx) const override; diff --git a/onnxruntime/core/providers/cpu/generator/random.cc b/onnxruntime/core/providers/cpu/generator/random.cc index 5331d8c8f5e7a..f6bd1770b1ed0 100644 --- a/onnxruntime/core/providers/cpu/generator/random.cc +++ b/onnxruntime/core/providers/cpu/generator/random.cc @@ -104,12 +104,12 @@ Status RandomNormalLike::Compute(OpKernelContext* ctx) const { Tensor* Y = nullptr; auto status = CreateOutputTensorFromTensorShape(ctx, X, &Y); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); auto dtype = dtype_ != TensorProto_DataType_UNDEFINED ? dtype_ : InferDataType(X); if (dtype == TensorProto_DataType_UNDEFINED) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Could not infer data type from input tensor with data type ", X.DataType()); @@ -125,12 +125,12 @@ Status RandomUniformLike::Compute(OpKernelContext* ctx) const { Tensor* Y = nullptr; auto status = CreateOutputTensorFromTensorShape(ctx, X, &Y); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); auto dtype = dtype_ != TensorProto_DataType_UNDEFINED ? dtype_ : InferDataType(X); if (dtype == TensorProto_DataType_UNDEFINED) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Could not infer data type from input tensor with data type ", X.DataType()); status = RandomUniformCompute(low_, high_, generator_, dtype, *Y); @@ -251,7 +251,7 @@ Status Multinomial::Compute(OpKernelContext* ctx) const { break; } default: - status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid data type of ", output_dtype_); + status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Invalid data type of ", output_dtype_); } return status; @@ -268,7 +268,7 @@ static Status CreateOutputTensorFromTensorValues(OpKernelContext* ctx, const Ten auto num_dims = shape.NumDimensions(); if (num_dims != 1) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Expected 1 dimension tensor with shape information. Dimensions=", num_dims); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Expected 1 dimension tensor with shape information. Dimensions=", num_dims); } std::vector dims; @@ -317,7 +317,7 @@ static Status RandomNormalCompute(float mean, float scale, break; } case TensorProto::FLOAT16: { - ONNXRUNTIME_NOT_IMPLEMENTED("FLOAT16 is not supported"); + ORT_NOT_IMPLEMENTED("FLOAT16 is not supported"); } case TensorProto::DOUBLE: { GenerateData>( @@ -325,7 +325,7 @@ static Status RandomNormalCompute(float mean, float scale, break; } default: - ONNXRUNTIME_THROW("Invalid data type of ", dtype); + ORT_THROW("Invalid data type of ", dtype); } return Status::OK(); @@ -342,7 +342,7 @@ static Status RandomUniformCompute(float low, float high, break; } case TensorProto::FLOAT16: { - ONNXRUNTIME_NOT_IMPLEMENTED("FLOAT16 is not supported"); + ORT_NOT_IMPLEMENTED("FLOAT16 is not supported"); } case TensorProto::DOUBLE: { GenerateData>( @@ -350,7 +350,7 @@ static Status RandomUniformCompute(float low, float high, break; } default: - ONNXRUNTIME_THROW("Invalid data type of ", dtype); + ORT_THROW("Invalid data type of ", dtype); } return Status::OK(); diff --git a/onnxruntime/core/providers/cpu/generator/random.h b/onnxruntime/core/providers/cpu/generator/random.h index fb8bdcef7b092..a0082aea73938 100644 --- a/onnxruntime/core/providers/cpu/generator/random.h +++ b/onnxruntime/core/providers/cpu/generator/random.h @@ -14,8 +14,8 @@ namespace onnxruntime { class RandomNormal final : public OpKernel { public: RandomNormal(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("mean", &mean_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); + ORT_ENFORCE(info.GetAttr("mean", &mean_).IsOK()); + ORT_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); // read optional seed attribute and generate if not provided float seed = 0.f; @@ -26,13 +26,13 @@ class RandomNormal final : public OpKernel { generator_ = std::default_random_engine{gsl::narrow_cast(seed)}; int64_t dtype; - ONNXRUNTIME_ENFORCE(info.GetAttr("dtype", &dtype).IsOK()); + ORT_ENFORCE(info.GetAttr("dtype", &dtype).IsOK()); dtype_ = static_cast(dtype); - ONNXRUNTIME_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, + ORT_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, "Invalid dtype of ", dtype_); std::vector shape; - ONNXRUNTIME_ENFORCE(info.GetAttrs("shape", shape).IsOK()); + ORT_ENFORCE(info.GetAttrs("shape", shape).IsOK()); shape_ = TensorShape(shape); } @@ -49,8 +49,8 @@ class RandomNormal final : public OpKernel { class RandomNormalLike final : public OpKernel { public: RandomNormalLike(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("mean", &mean_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); + ORT_ENFORCE(info.GetAttr("mean", &mean_).IsOK()); + ORT_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); // read optional seed attribute and generate if not provided float seed = 0.f; @@ -63,7 +63,7 @@ class RandomNormalLike final : public OpKernel { int64_t dtype; if (info.GetAttr("dtype", &dtype).IsOK()) { dtype_ = static_cast(dtype); - ONNXRUNTIME_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, + ORT_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, "Invalid dtype of ", dtype_); } } @@ -80,8 +80,8 @@ class RandomNormalLike final : public OpKernel { class RandomUniform final : public OpKernel { public: RandomUniform(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("high", &high_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("low", &low_).IsOK()); + ORT_ENFORCE(info.GetAttr("high", &high_).IsOK()); + ORT_ENFORCE(info.GetAttr("low", &low_).IsOK()); // read optional seed attribute and generate if not provided float seed = 0.f; @@ -92,13 +92,13 @@ class RandomUniform final : public OpKernel { generator_ = std::default_random_engine{gsl::narrow_cast(seed)}; int64_t dtype; - ONNXRUNTIME_ENFORCE(info.GetAttr("dtype", &dtype).IsOK()); + ORT_ENFORCE(info.GetAttr("dtype", &dtype).IsOK()); dtype_ = static_cast(dtype); - ONNXRUNTIME_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, + ORT_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, "Invalid dtype of ", dtype_); std::vector shape; - ONNXRUNTIME_ENFORCE(info.GetAttrs("shape", shape).IsOK()); + ORT_ENFORCE(info.GetAttrs("shape", shape).IsOK()); shape_ = TensorShape(shape); } @@ -115,8 +115,8 @@ class RandomUniform final : public OpKernel { class RandomUniformLike final : public OpKernel { public: RandomUniformLike(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("high", &high_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("low", &low_).IsOK()); + ORT_ENFORCE(info.GetAttr("high", &high_).IsOK()); + ORT_ENFORCE(info.GetAttr("low", &low_).IsOK()); // read optional seed attribute and generate if not provided float seed = 0.f; if (!info.GetAttr("seed", &seed).IsOK()) { @@ -128,7 +128,7 @@ class RandomUniformLike final : public OpKernel { int64_t dtype; if (info.GetAttr("dtype", &dtype).IsOK()) { dtype_ = static_cast(dtype); - ONNXRUNTIME_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, + ORT_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(dtype_) && dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, "Invalid dtype of ", dtype_); } } @@ -145,7 +145,7 @@ class RandomUniformLike final : public OpKernel { class Multinomial final : public OpKernel { public: Multinomial(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("sample_size", &num_samples_).IsOK()); + ORT_ENFORCE(info.GetAttr("sample_size", &num_samples_).IsOK()); float seed = 0.f; if (!info.GetAttr("seed", &seed).IsOK()) { @@ -160,7 +160,7 @@ class Multinomial final : public OpKernel { } else { output_dtype_ = static_cast(output_dtype_tmp); } - ONNXRUNTIME_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(output_dtype_) && output_dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, + ORT_ENFORCE(ONNX_NAMESPACE::TensorProto::DataType_IsValid(output_dtype_) && output_dtype_ != ONNX_NAMESPACE::TensorProto::UNDEFINED, "Invalid dtype of ", output_dtype_); } diff --git a/onnxruntime/core/providers/cpu/math/clip.h b/onnxruntime/core/providers/cpu/math/clip.h index ea1feb3dc3e4d..ff1b2ba238bf6 100644 --- a/onnxruntime/core/providers/cpu/math/clip.h +++ b/onnxruntime/core/providers/cpu/math/clip.h @@ -13,8 +13,8 @@ template class Clip final : public OpKernel { public: Clip(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("max", &max_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("min", &min_).IsOK()); + ORT_ENFORCE(info.GetAttr("max", &max_).IsOK()); + ORT_ENFORCE(info.GetAttr("min", &min_).IsOK()); } Status Compute(OpKernelContext* ctx) const override { diff --git a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc index ab05ed82be80c..f2063f5736a24 100644 --- a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc +++ b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc @@ -439,7 +439,7 @@ Status Log::Compute(OpKernelContext* ctx) const { template <> Status Sum_6::Compute(OpKernelContext* ctx) const { auto input_count = Node().InputArgCount().front(); - ONNXRUNTIME_ENFORCE(input_count >= 1, "Must have 1 or more inputs"); + ORT_ENFORCE(input_count >= 1, "Must have 1 or more inputs"); auto& data_0 = *ctx->Input(0); auto& shape = data_0.Shape(); auto sum = EigenMap(*ctx->Output(0, shape)); @@ -448,12 +448,12 @@ Status Sum_6::Compute(OpKernelContext* ctx) const { sum = EigenMap(data_0); } else { auto& data_1 = *ctx->Input(1); - ONNXRUNTIME_ENFORCE(data_1.Shape() == shape, "All inputs must have the same shape"); + ORT_ENFORCE(data_1.Shape() == shape, "All inputs must have the same shape"); sum = EigenMap(data_0) + EigenMap(data_1); for (int index = 2; index < input_count; index++) { auto& data_n = *ctx->Input(index); - ONNXRUNTIME_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); + ORT_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); sum += EigenMap(data_n); } } @@ -473,7 +473,7 @@ Status Sum_8::Compute(OpKernelContext* context) const { template <> Status Min_6::Compute(OpKernelContext* ctx) const { auto inputCount = Node().InputArgCount().front(); - ONNXRUNTIME_ENFORCE(inputCount >= 1, "Must have 1 or more inputs"); + ORT_ENFORCE(inputCount >= 1, "Must have 1 or more inputs"); auto& data_0 = *ctx->Input(0); auto& shape = data_0.Shape(); auto min = EigenMap(*ctx->Output(0, shape)); @@ -481,7 +481,7 @@ Status Min_6::Compute(OpKernelContext* ctx) const { min = EigenMap(data_0); for (int index = 1; index < inputCount; index++) { auto& data_n = *ctx->Input(index); - ONNXRUNTIME_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); + ORT_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); min = min.array().min(EigenMap(data_n).array()); } @@ -500,7 +500,7 @@ Status Min_8::Compute(OpKernelContext* context) const { template <> Status Max_6::Compute(OpKernelContext* ctx) const { auto inputCount = Node().InputArgCount().front(); - ONNXRUNTIME_ENFORCE(inputCount >= 1, "Must have 1 or more inputs"); + ORT_ENFORCE(inputCount >= 1, "Must have 1 or more inputs"); auto& data_0 = *ctx->Input(0); auto& shape = data_0.Shape(); auto max = EigenMap(*ctx->Output(0, shape)); @@ -508,7 +508,7 @@ Status Max_6::Compute(OpKernelContext* ctx) const { max = EigenMap(data_0); for (int index = 1; index < inputCount; index++) { auto& data_n = *ctx->Input(index); - ONNXRUNTIME_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); + ORT_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); max = max.array().max(EigenMap(data_n).array()); } @@ -619,7 +619,7 @@ Status Greater::Compute(OpKernelContext* context) const { template <> Status Mean_6::Compute(OpKernelContext* ctx) const { auto inputCount = Node().InputArgCount().front(); - ONNXRUNTIME_ENFORCE(inputCount >= 1, "Must have 1 or more inputs"); + ORT_ENFORCE(inputCount >= 1, "Must have 1 or more inputs"); auto& data_0 = *ctx->Input(0); auto& shape = data_0.Shape(); auto mean = EigenMap(*ctx->Output(0, shape)); @@ -628,12 +628,12 @@ Status Mean_6::Compute(OpKernelContext* ctx) const { mean = EigenMap(data_0); } else { auto& data_1 = *ctx->Input(1); - ONNXRUNTIME_ENFORCE(data_1.Shape() == shape, "All inputs must have the same shape"); + ORT_ENFORCE(data_1.Shape() == shape, "All inputs must have the same shape"); mean = EigenMap(data_0) + EigenMap(data_1); for (int index = 2; index < inputCount; index++) { auto& data_n = *ctx->Input(index); - ONNXRUNTIME_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); + ORT_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape"); mean += EigenMap(data_n); } } @@ -844,7 +844,7 @@ struct TBroadcasterExpand { template Status Expand_8::Compute(OpKernelContext* context) const { auto& tensor_shape = *context->Input(1); - ONNXRUNTIME_ENFORCE(tensor_shape.Shape().GetDims().size() == 1, "Shape must be 1 dimensional as it's tensor data is a shape"); + ORT_ENFORCE(tensor_shape.Shape().GetDims().size() == 1, "Shape must be 1 dimensional as it's tensor data is a shape"); // Turn the shape tensor data into an actual shape const int64_t* p_shape = tensor_shape.template Data(); @@ -884,7 +884,7 @@ Status Scale::Compute(OpKernelContext* ctx) const { template <> Status Erf::Compute(OpKernelContext* context) const { auto X_ptr = context->Input(0); - ONNXRUNTIME_ENFORCE(X_ptr != nullptr); + ORT_ENFORCE(X_ptr != nullptr); auto& X = *X_ptr; auto& Y = *context->Output(0, X.Shape()); diff --git a/onnxruntime/core/providers/cpu/math/element_wise_ops.h b/onnxruntime/core/providers/cpu/math/element_wise_ops.h index a5ce0c28c86eb..f36974dec9ec2 100644 --- a/onnxruntime/core/providers/cpu/math/element_wise_ops.h +++ b/onnxruntime/core/providers/cpu/math/element_wise_ops.h @@ -274,8 +274,8 @@ class Affine final : public OpKernel { public: Affine(const OpKernelInfo& info) : OpKernel(info) { // Either model-supplied or default values should be returned for alpha and beta - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status Compute(OpKernelContext* context) const override; @@ -308,7 +308,7 @@ template class Scale final : public OpKernel { public: Scale(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); + ORT_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); } Status Compute(OpKernelContext* context) const override; @@ -350,7 +350,7 @@ struct BroadcastIterator { } void Init(int64_t axis, int64_t largest) { - ONNXRUNTIME_ENFORCE(axis == 1 || axis == largest, "Attempting to broadcast an axis by a dimension other than 1. ", axis, " by ", largest); + ORT_ENFORCE(axis == 1 || axis == largest, "Attempting to broadcast an axis by a dimension other than 1. ", axis, " by ", largest); deltas_.push_back(axis > 1); counts_.push_back(largest); @@ -358,7 +358,7 @@ struct BroadcastIterator { } void Append(int64_t axis, int64_t largest) { - ONNXRUNTIME_ENFORCE(axis == 1 || axis == largest, "Attempting to broadcast an axis by a dimension other than 1. ", axis, " by ", largest); + ORT_ENFORCE(axis == 1 || axis == largest, "Attempting to broadcast an axis by a dimension other than 1. ", axis, " by ", largest); // If we're greater than 1, it doesn't matter what the other tensor does if (axis > 1) { @@ -552,7 +552,7 @@ struct TBroadcastOutput { template struct TensorAllocator { TensorAllocator(OpKernelContext& context) { - ONNXRUNTIME_ENFORCE(context.GetTempSpaceAllocator(&allocator_).IsOK()); + ORT_ENFORCE(context.GetTempSpaceAllocator(&allocator_).IsOK()); } std::unique_ptr Allocate(const TensorShape& shape) { @@ -597,7 +597,7 @@ Status BroadcastTwo(OpKernelContext& context, Input0Scalar input0scalar, Input1S template Status BroadcastVariadic(const Node& node, OpKernelContext& context, Input0Scalar input0scalar, Input1Scalar input1scalar, General general) { auto input_count = node.InputArgCount().front(); - ONNXRUNTIME_ENFORCE(input_count >= 1, "Must have 1 or more inputs"); + ORT_ENFORCE(input_count >= 1, "Must have 1 or more inputs"); // One item is trivial, just copy across and exit if (input_count == 1) { diff --git a/onnxruntime/core/providers/cpu/math/gemm.h b/onnxruntime/core/providers/cpu/math/gemm.h index b995f95a82609..0dda31cf6ecb0 100644 --- a/onnxruntime/core/providers/cpu/math/gemm.h +++ b/onnxruntime/core/providers/cpu/math/gemm.h @@ -19,14 +19,14 @@ class Gemm final : public OpKernel { public: Gemm(const OpKernelInfo& info) : OpKernel(info) { int64_t temp; - ONNXRUNTIME_ENFORCE(info.GetAttr("transA", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("transA", &temp).IsOK()); trans_A_ = temp == 0 ? CblasNoTrans : CblasTrans; - ONNXRUNTIME_ENFORCE(info.GetAttr("transB", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("transB", &temp).IsOK()); trans_B_ = temp == 0 ? CblasNoTrans : CblasTrans; - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status Compute(OpKernelContext* context) const override { diff --git a/onnxruntime/core/providers/cpu/math/gemm_helper.h b/onnxruntime/core/providers/cpu/math/gemm_helper.h index 14d259eb4bb25..a0b23f7ccc4ea 100644 --- a/onnxruntime/core/providers/cpu/math/gemm_helper.h +++ b/onnxruntime/core/providers/cpu/math/gemm_helper.h @@ -10,8 +10,8 @@ class GemmHelper { public: GemmHelper(const TensorShape& left, bool trans_left, const TensorShape& right, bool trans_right, const TensorShape& bias) { //dimension check - ONNXRUNTIME_ENFORCE(left.NumDimensions() == 2); - ONNXRUNTIME_ENFORCE(right.NumDimensions() == 2); + ORT_ENFORCE(left.NumDimensions() == 2); + ORT_ENFORCE(right.NumDimensions() == 2); if (trans_left) { M_ = left[1]; @@ -31,7 +31,7 @@ class GemmHelper { } if (right[k_dim] != K_) - status_ = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + status_ = ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "GEMM: Dimension mismatch, W: ", right.ToString(), " K: " + std::to_string(K_), @@ -40,7 +40,7 @@ class GemmHelper { if (!IsValidBroadcast(bias, M_, N_)) status_ = common::Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, "Gemm: Invalid bias shape for broadcast"); - ONNXRUNTIME_ENFORCE(M_ > 0 && N_ > 0 && K_ > 0); + ORT_ENFORCE(M_ > 0 && N_ > 0 && K_ > 0); } int64_t M() const { return M_; } diff --git a/onnxruntime/core/providers/cpu/math/hardmax.h b/onnxruntime/core/providers/cpu/math/hardmax.h index 23c7bb6b36920..745973b8dbaa2 100644 --- a/onnxruntime/core/providers/cpu/math/hardmax.h +++ b/onnxruntime/core/providers/cpu/math/hardmax.h @@ -21,7 +21,7 @@ class Hardmax final : public OpKernel { } // if value was provided, make sure it was valid - ONNXRUNTIME_ENFORCE(axis_ >= 0, "Invalid axis provided."); + ORT_ENFORCE(axis_ >= 0, "Invalid axis provided."); } Status Compute(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/cpu/math/matmul.cc b/onnxruntime/core/providers/cpu/math/matmul.cc index 68ae225320821..b66f302656695 100644 --- a/onnxruntime/core/providers/cpu/math/matmul.cc +++ b/onnxruntime/core/providers/cpu/math/matmul.cc @@ -22,7 +22,7 @@ Status MatMul::Compute(OpKernelContext* ctx) const { const Tensor* right_X = ctx->Input(1); MatMulComputeHelper helper; - ONNXRUNTIME_RETURN_IF_ERROR(helper.Compute(left_X->Shape(), right_X->Shape())); + ORT_RETURN_IF_ERROR(helper.Compute(left_X->Shape(), right_X->Shape())); Tensor* Y = ctx->Output(0, helper.OutputShape()); diff --git a/onnxruntime/core/providers/cpu/math/matmul_helper.h b/onnxruntime/core/providers/cpu/math/matmul_helper.h index 994b621998cf3..ab3a3b8a5a288 100644 --- a/onnxruntime/core/providers/cpu/math/matmul_helper.h +++ b/onnxruntime/core/providers/cpu/math/matmul_helper.h @@ -20,7 +20,7 @@ class MatMulComputeHelper { size_t left_num_dims = left_shape.NumDimensions(); size_t right_num_dims = right_shape.NumDimensions(); - ONNXRUNTIME_RETURN_IF_NOT(left_num_dims >= 1 && right_num_dims >= 1); + ORT_RETURN_IF_NOT(left_num_dims >= 1 && right_num_dims >= 1); // special case for right_shape being 2D and left_shape > 2D by flattening left_shape to 2D // note that padding 1s in front of the right shape can be flattened too @@ -35,7 +35,7 @@ class MatMulComputeHelper { output_offsets_ = {0}; left_offsets_ = {0}; right_offsets_ = {0}; - ONNXRUNTIME_RETURN_IF_NOT(K_ == right_shape[right_num_dims - 2], "MatMul dimension mismatch"); + ORT_RETURN_IF_NOT(K_ == right_shape[right_num_dims - 2], "MatMul dimension mismatch"); return Status::OK(); } @@ -78,9 +78,9 @@ class MatMulComputeHelper { for (int idx_dim = 0; idx_dim < num_dims_with_pad - 2; ++idx_dim) { output_dims[idx_dim] = std::max(left_padded_dims_[idx_dim], right_padded_dims_[idx_dim]); if (left_padded_dims_[idx_dim] != output_dims[idx_dim]) - ONNXRUNTIME_RETURN_IF_NOT(left_padded_dims_[idx_dim] == 1, "left operand cannot broadcast on dim ", idx_dim); + ORT_RETURN_IF_NOT(left_padded_dims_[idx_dim] == 1, "left operand cannot broadcast on dim ", idx_dim); if (right_padded_dims_[idx_dim] != output_dims[idx_dim]) - ONNXRUNTIME_RETURN_IF_NOT(right_padded_dims_[idx_dim] == 1, "right operand cannot broadcast on dim ", idx_dim); + ORT_RETURN_IF_NOT(right_padded_dims_[idx_dim] == 1, "right operand cannot broadcast on dim ", idx_dim); } M_ = has_1D_input ? 1 : left_shape[left_num_dims - 2]; @@ -88,24 +88,24 @@ class MatMulComputeHelper { N_ = (right_num_dims == 1) ? 1 : right_shape[right_num_dims - 1]; if (!has_1D_input) { - ONNXRUNTIME_RETURN_IF_NOT(K_ == right_shape[right_num_dims - 2], "MatMul dimension mismatch"); + ORT_RETURN_IF_NOT(K_ == right_shape[right_num_dims - 2], "MatMul dimension mismatch"); // left (...M x K), right (...K x N), output (...M x N) - ONNXRUNTIME_RETURN_IF_NOT(num_dims_with_pad == num_output_dims); + ORT_RETURN_IF_NOT(num_dims_with_pad == num_output_dims); output_dims[num_output_dims - 2] = M_; output_dims[num_output_dims - 1] = N_; } else { if (num_output_dims == 0) { // for left and right being both vector, output is scalar thus no shape - ONNXRUNTIME_RETURN_IF_NOT(M_ == 1 && N_ == 1); + ORT_RETURN_IF_NOT(M_ == 1 && N_ == 1); } else { if (left_num_dims == 1) { - ONNXRUNTIME_RETURN_IF_NOT(num_dims_with_pad - 1 == num_output_dims); - ONNXRUNTIME_RETURN_IF_NOT(K_ == right_shape[right_num_dims - 2], "MatMul dimension mismatch"); + ORT_RETURN_IF_NOT(num_dims_with_pad - 1 == num_output_dims); + ORT_RETURN_IF_NOT(K_ == right_shape[right_num_dims - 2], "MatMul dimension mismatch"); // left (K), right (...K,N), output (...N) output_dims[num_output_dims - 1] = N_; } else { - ONNXRUNTIME_RETURN_IF_NOT(num_dims_with_pad - 2 == num_output_dims); - ONNXRUNTIME_RETURN_IF_NOT(K_ == right_shape[0], "MatMul dimension mismatch"); + ORT_RETURN_IF_NOT(num_dims_with_pad - 2 == num_output_dims); + ORT_RETURN_IF_NOT(K_ == right_shape[0], "MatMul dimension mismatch"); // left(...K), right (K), output (...), already assigned } } @@ -239,7 +239,7 @@ class MatMulComputeHelper { template static void OffsetToArrays(T* p, const std::vector& offsets, gsl::span arrays) { auto len = offsets.size(); - ONNXRUNTIME_ENFORCE(arrays.size() == gsl::narrow_cast(len)); + ORT_ENFORCE(arrays.size() == gsl::narrow_cast(len)); for (size_t i = 0; i < len; i++) { arrays[i] = p + offsets[i]; } @@ -248,7 +248,7 @@ class MatMulComputeHelper { template static void OffsetToArrays(const T* p, const std::vector& offsets, gsl::span arrays) { auto len = offsets.size(); - ONNXRUNTIME_ENFORCE(arrays.size() == gsl::narrow_cast(len)); + ORT_ENFORCE(arrays.size() == gsl::narrow_cast(len)); for (size_t i = 0; i < len; i++) { arrays[i] = p + offsets[i]; } diff --git a/onnxruntime/core/providers/cpu/math/top_k.cc b/onnxruntime/core/providers/cpu/math/top_k.cc index 2bd5127523e74..1190ba07914b7 100644 --- a/onnxruntime/core/providers/cpu/math/top_k.cc +++ b/onnxruntime/core/providers/cpu/math/top_k.cc @@ -31,7 +31,7 @@ ONNX_CPU_OPERATOR_KERNEL( TopK); static int64_t SizeToDim(size_t k, const vector& dims) { - ONNXRUNTIME_ENFORCE(k <= dims.size()); + ORT_ENFORCE(k <= dims.size()); int64_t r = 1; for (size_t i = 0; i < k; ++i) { r *= dims[i]; diff --git a/onnxruntime/core/providers/cpu/math/top_k.h b/onnxruntime/core/providers/cpu/math/top_k.h index 80d14b5e7a1e0..0cc87b148bd59 100644 --- a/onnxruntime/core/providers/cpu/math/top_k.h +++ b/onnxruntime/core/providers/cpu/math/top_k.h @@ -14,12 +14,12 @@ class TopK final : public OpKernel { public: TopK(const OpKernelInfo& op_kernel_info) : OpKernel(op_kernel_info) { int64_t k_temp; - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("k", &k_temp).IsOK()); - ONNXRUNTIME_ENFORCE(k_temp > 0); + ORT_ENFORCE(op_kernel_info.GetAttr("k", &k_temp).IsOK()); + ORT_ENFORCE(k_temp > 0); k_ = gsl::narrow_cast(k_temp); int64_t axis_temp; - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("axis", &axis_temp).IsOK()); + ORT_ENFORCE(op_kernel_info.GetAttr("axis", &axis_temp).IsOK()); axis_ = gsl::narrow_cast(axis_temp); } diff --git a/onnxruntime/core/providers/cpu/ml/cast_map.cc b/onnxruntime/core/providers/cpu/ml/cast_map.cc index f7cfef462665d..489fd8171379c 100644 --- a/onnxruntime/core/providers/cpu/ml/cast_map.cc +++ b/onnxruntime/core/providers/cpu/ml/cast_map.cc @@ -64,7 +64,7 @@ Status CastMap::Compute(OpKernelContext* context) const { if (input_type == DataTypeImpl::GetType>()) { float_input = true; } else if (input_type != DataTypeImpl::GetType>()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input type of value: ", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input type of value: ", input_type, " Expected std::map or std::map"); } @@ -122,7 +122,7 @@ Status CastMap::ComputeImpl(OpKernelContext& context, TTo pad_value) const { auto out_end = out.end(); int64_t cur_idx = 0; - ONNXRUNTIME_ENFORCE(cur_input == end_input || cur_input->first >= 0, + ORT_ENFORCE(cur_input == end_input || cur_input->first >= 0, "Negative index values are not permitted. First entry in map has index value of ", cur_input->first); // for each output value, see if we have an input value, if not use the pad value diff --git a/onnxruntime/core/providers/cpu/ml/cast_map.h b/onnxruntime/core/providers/cpu/ml/cast_map.h index 008bf13fc92b8..9f1d8686efc27 100644 --- a/onnxruntime/core/providers/cpu/ml/cast_map.h +++ b/onnxruntime/core/providers/cpu/ml/cast_map.h @@ -15,16 +15,16 @@ class CastMap final : public OpKernel { CastMap(const OpKernelInfo& info) : OpKernel(info) { std::string attr; - ONNXRUNTIME_ENFORCE(info.GetAttr("cast_to", &attr).IsOK()); + ORT_ENFORCE(info.GetAttr("cast_to", &attr).IsOK()); cast_to_ = MakeCast(attr); - ONNXRUNTIME_ENFORCE(info.GetAttr("map_form", &attr).IsOK()); + ORT_ENFORCE(info.GetAttr("map_form", &attr).IsOK()); map_form_ = MakePack(attr); // ignore if not found as we fall back to the default of 1 - ONNXRUNTIME_ENFORCE(info.GetAttr("max_map", &max_map_).IsOK()); + ORT_ENFORCE(info.GetAttr("max_map", &max_map_).IsOK()); - ONNXRUNTIME_ENFORCE(map_form_ != PACK_MAP::SPARSE || max_map_ > 0, "max_map must be > 0 if map_form is SPARSE"); + ORT_ENFORCE(map_form_ != PACK_MAP::SPARSE || max_map_ > 0, "max_map must be > 0 if map_form is SPARSE"); } Status Compute(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/cpu/ml/category_mapper.h b/onnxruntime/core/providers/cpu/ml/category_mapper.h index 4693668f4e6bd..62432a0ef00ff 100644 --- a/onnxruntime/core/providers/cpu/ml/category_mapper.h +++ b/onnxruntime/core/providers/cpu/ml/category_mapper.h @@ -16,15 +16,15 @@ class CategoryMapper final : public OpKernel { std::vector string_categories; std::vector int_categories; - ONNXRUNTIME_ENFORCE(info.GetAttrs("cats_strings", string_categories).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("cats_int64s", int_categories).IsOK()); + ORT_ENFORCE(info.GetAttrs("cats_strings", string_categories).IsOK()); + ORT_ENFORCE(info.GetAttrs("cats_int64s", int_categories).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("default_string", &default_string_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("default_int64", &default_int_).IsOK()); + ORT_ENFORCE(info.GetAttr("default_string", &default_string_).IsOK()); + ORT_ENFORCE(info.GetAttr("default_int64", &default_int_).IsOK()); auto num_entries = string_categories.size(); - ONNXRUNTIME_ENFORCE(num_entries == int_categories.size()); + ORT_ENFORCE(num_entries == int_categories.size()); string_to_int_map_.reserve(num_entries); int_to_string_map_.reserve(num_entries); diff --git a/onnxruntime/core/providers/cpu/ml/dictvectorizer.h b/onnxruntime/core/providers/cpu/ml/dictvectorizer.h index 1a852b90d25b3..855f7aeafd8a4 100644 --- a/onnxruntime/core/providers/cpu/ml/dictvectorizer.h +++ b/onnxruntime/core/providers/cpu/ml/dictvectorizer.h @@ -15,7 +15,7 @@ class DictVectorizerOp final : public OpKernel { DictVectorizerOp(const OpKernelInfo& info) : OpKernel(info) { //In some stupid models, the vocabulary could have duplicated elements. //We must support that, otherwise some tests will be break. - ONNXRUNTIME_ENFORCE(info.GetAttrs(std::is_same::value ? "string_vocabulary" : "int64_vocabulary", vocabulary_).IsOK()); + ORT_ENFORCE(info.GetAttrs(std::is_same::value ? "string_vocabulary" : "int64_vocabulary", vocabulary_).IsOK()); } common::Status Compute(OpKernelContext* ctx) const override { auto map = ctx->Input >(0); diff --git a/onnxruntime/core/providers/cpu/ml/feature_vectorizer.cc b/onnxruntime/core/providers/cpu/ml/feature_vectorizer.cc index 95f99487f2ebe..ae2e5aa5630b0 100644 --- a/onnxruntime/core/providers/cpu/ml/feature_vectorizer.cc +++ b/onnxruntime/core/providers/cpu/ml/feature_vectorizer.cc @@ -29,7 +29,7 @@ static void CopyWithCast(typename gsl::span::const_iterator begin, Status FeatureVectorizer::Compute(OpKernelContext* context) const { auto input_count = context->NumVariadicInputs(0); - ONNXRUNTIME_ENFORCE(input_count == input_dimensions_.size(), + ORT_ENFORCE(input_count == input_dimensions_.size(), "Number of inputs (", input_count, ") does not match number of inputdimensions values (", input_dimensions_.size(), ")."); @@ -55,7 +55,7 @@ Status FeatureVectorizer::Compute(OpKernelContext* context) const { // for each feature, write out its data in one pass for (int index = 0; index < input_count; ++index) { const Tensor* input_tensor_ptr = context->Input(index); - ONNXRUNTIME_ENFORCE(input_tensor_ptr != nullptr); + ORT_ENFORCE(input_tensor_ptr != nullptr); auto& input_tensor = *input_tensor_ptr; auto feature_size = input_dimensions_[index]; @@ -74,7 +74,7 @@ Status FeatureVectorizer::Compute(OpKernelContext* context) const { VectorizeTensor(input_tensor, feature_size, total_dimensions_, cur_out); } else { // should never happen. graph validation should have failed - ONNXRUNTIME_THROW("Invalid input type:", data_type); + ORT_THROW("Invalid input type:", data_type); } // move to start of next feature diff --git a/onnxruntime/core/providers/cpu/ml/feature_vectorizer.h b/onnxruntime/core/providers/cpu/ml/feature_vectorizer.h index 6a5ce854daa62..b1b4b9a0f1d40 100644 --- a/onnxruntime/core/providers/cpu/ml/feature_vectorizer.h +++ b/onnxruntime/core/providers/cpu/ml/feature_vectorizer.h @@ -15,7 +15,7 @@ class FeatureVectorizer final : public OpKernel { public: FeatureVectorizer(const OpKernelInfo& info) : OpKernel(info) { auto status = info.GetAttrs("inputdimensions", input_dimensions_); - ONNXRUNTIME_ENFORCE(status.IsOK() && !input_dimensions_.empty(), "inputdimensions attribute must be provided"); + ORT_ENFORCE(status.IsOK() && !input_dimensions_.empty(), "inputdimensions attribute must be provided"); total_dimensions_ = std::accumulate(input_dimensions_.cbegin(), input_dimensions_.cend(), 0LL); } diff --git a/onnxruntime/core/providers/cpu/ml/imputer.cc b/onnxruntime/core/providers/cpu/ml/imputer.cc index aaadd4e69b07a..9b64f3d6d599c 100644 --- a/onnxruntime/core/providers/cpu/ml/imputer.cc +++ b/onnxruntime/core/providers/cpu/ml/imputer.cc @@ -57,10 +57,10 @@ ImputerOp::ImputerOp(const OpKernelInfo& info) : OpKernel(info), imputed_values_float_(info.GetAttrsOrDefault("imputed_value_floats")), imputed_values_int64_(info.GetAttrsOrDefault("imputed_value_int64s")) { if (imputed_values_float_.size() && !info.GetAttr("replaced_value_float", &replaced_value_float_).IsOK()) - ONNXRUNTIME_THROW("Expected 'replaced_value_float' attribute since 'imputed_value_floats' is specified"); + ORT_THROW("Expected 'replaced_value_float' attribute since 'imputed_value_floats' is specified"); if (imputed_values_int64_.size() && !info.GetAttr("replaced_value_int64", &replaced_value_int64_).IsOK()) - ONNXRUNTIME_THROW("Expected 'replace_value_int64' attribute since 'imputed_values_int64' is specified"); - ONNXRUNTIME_ENFORCE(imputed_values_float_.empty() ^ imputed_values_int64_.empty(), + ORT_THROW("Expected 'replace_value_int64' attribute since 'imputed_values_int64' is specified"); + ORT_ENFORCE(imputed_values_float_.empty() ^ imputed_values_int64_.empty(), "Must provide imputed_values_float_ or imputed_values_int64_ but not both."); } @@ -114,7 +114,7 @@ common::Status ComputeByType(OpKernelContext* context, common::Status ImputerOp::Compute(OpKernelContext* context) const { const Tensor* input_tensor_ptr = context->Input(0); - ONNXRUNTIME_ENFORCE(input_tensor_ptr != nullptr); + ORT_ENFORCE(input_tensor_ptr != nullptr); auto input_type = input_tensor_ptr->DataType(); if (input_type == DataTypeImpl::GetType()) { return ComputeByType(context, replaced_value_float_, imputed_values_float_); diff --git a/onnxruntime/core/providers/cpu/ml/label_encoder.h b/onnxruntime/core/providers/cpu/ml/label_encoder.h index 1855462a6ea97..597cf240c6ed4 100644 --- a/onnxruntime/core/providers/cpu/ml/label_encoder.h +++ b/onnxruntime/core/providers/cpu/ml/label_encoder.h @@ -15,10 +15,10 @@ class LabelEncoder final : public OpKernel { LabelEncoder(const OpKernelInfo& info) : OpKernel(info) { std::vector string_classes; - ONNXRUNTIME_ENFORCE(info.GetAttrs("classes_strings", string_classes).IsOK()); + ORT_ENFORCE(info.GetAttrs("classes_strings", string_classes).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("default_string", &default_string_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("default_int64", &default_int_).IsOK()); + ORT_ENFORCE(info.GetAttr("default_string", &default_string_).IsOK()); + ORT_ENFORCE(info.GetAttr("default_int64", &default_int_).IsOK()); auto num_entries = string_classes.size(); diff --git a/onnxruntime/core/providers/cpu/ml/linearclassifier.cc b/onnxruntime/core/providers/cpu/ml/linearclassifier.cc index 0beaf833ac8d2..70486c66d4f89 100644 --- a/onnxruntime/core/providers/cpu/ml/linearclassifier.cc +++ b/onnxruntime/core/providers/cpu/ml/linearclassifier.cc @@ -46,7 +46,7 @@ LinearClassifier::LinearClassifier(const OpKernelInfo& info) : OpKernel(info) classlabels_strings_(info.GetAttrsOrDefault("classlabels_strings")), classlabels_ints_(info.GetAttrsOrDefault("classlabels_ints")) { if (!info.GetAttrs("coefficients", coefficients_).IsOK()) - ONNXRUNTIME_ENFORCE(!coefficients_.empty()); + ORT_ENFORCE(!coefficients_.empty()); using_strings_ = !classlabels_strings_.empty(); class_count_ = static_cast(intercepts_.size()); diff --git a/onnxruntime/core/providers/cpu/ml/linearregressor.cc b/onnxruntime/core/providers/cpu/ml/linearregressor.cc index a3c97132170b6..37aa5deef11a0 100644 --- a/onnxruntime/core/providers/cpu/ml/linearregressor.cc +++ b/onnxruntime/core/providers/cpu/ml/linearregressor.cc @@ -16,8 +16,8 @@ template LinearRegressor::LinearRegressor(const OpKernelInfo& info) : OpKernel(info), intercepts_(info.GetAttrsOrDefault("intercepts")), post_transform_(MakeTransform(info.GetAttrOrDefault("post_transform", "NONE"))) { - ONNXRUNTIME_ENFORCE(info.GetAttr("targets", &targets_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("coefficients", coefficients_).IsOK()); + ORT_ENFORCE(info.GetAttr("targets", &targets_).IsOK()); + ORT_ENFORCE(info.GetAttrs("coefficients", coefficients_).IsOK()); } template <> diff --git a/onnxruntime/core/providers/cpu/ml/ml_common.h b/onnxruntime/core/providers/cpu/ml/ml_common.h index 4154982405c24..50c670d6e0ebf 100644 --- a/onnxruntime/core/providers/cpu/ml/ml_common.h +++ b/onnxruntime/core/providers/cpu/ml/ml_common.h @@ -98,7 +98,7 @@ static inline CAST_TO MakeCast(const std::string& input) { } else if (input == "TO_INT64") { return CAST_TO::TO_INT64; } else { - ONNXRUNTIME_THROW("Invalid CAST_TO value of ", input, " Expected TO_FLOAT, TO_STRING or TO_INT64"); + ORT_THROW("Invalid CAST_TO value of ", input, " Expected TO_FLOAT, TO_STRING or TO_INT64"); } } @@ -113,7 +113,7 @@ static inline PACK_MAP MakePack(const std::string& input) { } else if (input == "SPARSE") { return PACK_MAP::SPARSE; } else { - ONNXRUNTIME_THROW("Invalid PACK_MAP value of ", input, " Expected DENSE or SPARSE"); + ORT_THROW("Invalid PACK_MAP value of ", input, " Expected DENSE or SPARSE"); } } @@ -150,7 +150,7 @@ static inline NORMALIZE MakeNormalize(const std::string& input) { } else if (input == "L2") { return NORMALIZE::L2; } else { - ONNXRUNTIME_THROW("Invalid normalize value of ", input); + ORT_THROW("Invalid normalize value of ", input); } } diff --git a/onnxruntime/core/providers/cpu/ml/normalizer.cc b/onnxruntime/core/providers/cpu/ml/normalizer.cc index 211b61d1f7b52..cf557fc1a924e 100644 --- a/onnxruntime/core/providers/cpu/ml/normalizer.cc +++ b/onnxruntime/core/providers/cpu/ml/normalizer.cc @@ -44,7 +44,7 @@ ONNX_CPU_OPERATOR_ML_KERNEL( Status Normalizer::Compute(OpKernelContext* context) const { const Tensor* input_tensor_ptr = context->Input(0); - ONNXRUNTIME_ENFORCE(input_tensor_ptr != nullptr); + ORT_ENFORCE(input_tensor_ptr != nullptr); MLDataType input_type = input_tensor_ptr->DataType(); if (input_type == DataTypeImpl::GetType()) { @@ -56,7 +56,7 @@ Status Normalizer::Compute(OpKernelContext* context) const { } else if (input_type == DataTypeImpl::GetType()) { Normalize(context); } else { - ONNXRUNTIME_THROW("Invalid input type of ", input_type); + ORT_THROW("Invalid input type of ", input_type); } return Status::OK(); @@ -182,7 +182,7 @@ void Normalizer::Normalize(OpKernelContext* context) const { break; } default: { - ONNXRUNTIME_THROW("Unexpected NORMALIZE value of ", normalization_); + ORT_THROW("Unexpected NORMALIZE value of ", normalization_); } } } diff --git a/onnxruntime/core/providers/cpu/ml/normalizer.h b/onnxruntime/core/providers/cpu/ml/normalizer.h index ae8b633e9b351..b8c7eb627e234 100644 --- a/onnxruntime/core/providers/cpu/ml/normalizer.h +++ b/onnxruntime/core/providers/cpu/ml/normalizer.h @@ -16,7 +16,7 @@ class Normalizer final : public OpKernel { public: Normalizer(const OpKernelInfo& info) : OpKernel(info) { std::string norm; - ONNXRUNTIME_ENFORCE(info.GetAttr("norm", &norm).IsOK()); + ORT_ENFORCE(info.GetAttr("norm", &norm).IsOK()); normalization_ = MakeNormalize(norm); } diff --git a/onnxruntime/core/providers/cpu/ml/onehotencoder.cc b/onnxruntime/core/providers/cpu/ml/onehotencoder.cc index 1e986915d1a5d..427c3e5253097 100644 --- a/onnxruntime/core/providers/cpu/ml/onehotencoder.cc +++ b/onnxruntime/core/providers/cpu/ml/onehotencoder.cc @@ -52,7 +52,7 @@ template OneHotEncoderOp::OneHotEncoderOp(const OpKernelInfo& info) : OpKernel(info), zeros_(info.GetAttrOrDefault("zeros", 1)), num_categories_(0) { std::vector tmp_cats_int64s = info.GetAttrsOrDefault("cats_int64s"); std::vector tmp_cats_strings = info.GetAttrsOrDefault("cats_strings"); - ONNXRUNTIME_ENFORCE(tmp_cats_int64s.empty() || tmp_cats_strings.empty()); + ORT_ENFORCE(tmp_cats_int64s.empty() || tmp_cats_strings.empty()); if (!tmp_cats_int64s.empty()) { num_categories_ = tmp_cats_int64s.size(); for (size_t idx = 0, end = tmp_cats_int64s.size(); idx < end; ++idx) { @@ -64,14 +64,14 @@ OneHotEncoderOp::OneHotEncoderOp(const OpKernelInfo& info) : OpKernel(info), cats_strings_[tmp_cats_strings[idx]] = idx; } } - ONNXRUNTIME_ENFORCE(num_categories_ > 0); + ORT_ENFORCE(num_categories_ > 0); } template common::Status OneHotEncoderOp::Compute(OpKernelContext* context) const { const Tensor* X = context->Input(0); const TensorShape& input_shape = X->Shape(); - ONNXRUNTIME_ENFORCE(input_shape.NumDimensions() <= 2); + ORT_ENFORCE(input_shape.NumDimensions() <= 2); std::vector output_shape(input_shape.GetDims()); output_shape.push_back(num_categories_); @@ -96,7 +96,7 @@ template <> common::Status OneHotEncoderOp::Compute(OpKernelContext* context) const { const Tensor* X = context->Input(0); const TensorShape& input_shape = X->Shape(); - ONNXRUNTIME_ENFORCE(input_shape.NumDimensions() <= 2); + ORT_ENFORCE(input_shape.NumDimensions() <= 2); std::vector output_shape(input_shape.GetDims()); output_shape.push_back(num_categories_); diff --git a/onnxruntime/core/providers/cpu/ml/scaler.cc b/onnxruntime/core/providers/cpu/ml/scaler.cc index 5139de339b887..32cb102944b2c 100644 --- a/onnxruntime/core/providers/cpu/ml/scaler.cc +++ b/onnxruntime/core/providers/cpu/ml/scaler.cc @@ -64,8 +64,8 @@ template ScalerOp::ScalerOp(const OpKernelInfo& info) : OpKernel(info), scale_(info.GetAttrsOrDefault("scale")), offset_(info.GetAttrsOrDefault("offset")) { - ONNXRUNTIME_ENFORCE(!scale_.empty(), "Empty scale in attributes"); - ONNXRUNTIME_ENFORCE(scale_.size() == offset_.size(), + ORT_ENFORCE(!scale_.empty(), "Empty scale in attributes"); + ORT_ENFORCE(scale_.size() == offset_.size(), "Scale size: (" + std::to_string(scale_.size()) + ") != (" + std::to_string(offset_.size()) + ")"); } diff --git a/onnxruntime/core/providers/cpu/ml/svmclassifier.cc b/onnxruntime/core/providers/cpu/ml/svmclassifier.cc index a740d989ae078..7624d5c52cede 100644 --- a/onnxruntime/core/providers/cpu/ml/svmclassifier.cc +++ b/onnxruntime/core/providers/cpu/ml/svmclassifier.cc @@ -28,14 +28,14 @@ SVMClassifier::SVMClassifier(const OpKernelInfo& info) probb_(info.GetAttrsOrDefault("prob_b")), support_vectors_(info.GetAttrsOrDefault("support_vectors")), post_transform_(MakeTransform(info.GetAttrOrDefault("post_transform", "NONE"))) { - ONNXRUNTIME_ENFORCE(info.GetAttrs("rho", rho_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("coefficients", coefficients_).IsOK()); + ORT_ENFORCE(info.GetAttrs("rho", rho_).IsOK()); + ORT_ENFORCE(info.GetAttrs("coefficients", coefficients_).IsOK()); // prob_a and prob_b are optional for Z output - ONNXRUNTIME_ENFORCE(proba_.size() == probb_.size()); + ORT_ENFORCE(proba_.size() == probb_.size()); // one of these should be valid - ONNXRUNTIME_ENFORCE(info.GetAttrs("classlabels_strings", classlabels_strings_).IsOK() || + ORT_ENFORCE(info.GetAttrs("classlabels_strings", classlabels_strings_).IsOK() || info.GetAttrs("classlabels_ints", classlabels_ints_).IsOK()); vector_count_ = 0; @@ -63,9 +63,9 @@ SVMClassifier::SVMClassifier(const OpKernelInfo& info) mode_ = SVM_TYPE::SVM_LINEAR; set_kernel_type(KERNEL::LINEAR); } - ONNXRUNTIME_ENFORCE(classlabels_strings_.size() > 0 || classlabels_ints_.size() > 0); - ONNXRUNTIME_ENFORCE(proba_.size() == probb_.size()); - ONNXRUNTIME_ENFORCE(coefficients_.size() > 0); + ORT_ENFORCE(classlabels_strings_.size() > 0 || classlabels_ints_.size() > 0); + ORT_ENFORCE(proba_.size() == probb_.size()); + ORT_ENFORCE(coefficients_.size() > 0); weights_are_all_positive_ = true; for (int64_t i = 0; i < static_cast(coefficients_.size()); i++) { if (coefficients_[i] < 0) { diff --git a/onnxruntime/core/providers/cpu/ml/svmclassifier.h b/onnxruntime/core/providers/cpu/ml/svmclassifier.h index f51a6be5d75f0..5490a44fbb333 100644 --- a/onnxruntime/core/providers/cpu/ml/svmclassifier.h +++ b/onnxruntime/core/providers/cpu/ml/svmclassifier.h @@ -17,7 +17,7 @@ class SVMCommon { protected: SVMCommon(const OpKernelInfo& info) : kernel_type_(MakeKernel(info.GetAttrOrDefault("kernel_type", "LINEAR"))) { std::vector kernel_params; - ONNXRUNTIME_ENFORCE(info.GetAttrs("kernel_params", kernel_params).IsOK()); + ORT_ENFORCE(info.GetAttrs("kernel_params", kernel_params).IsOK()); if (kernel_params.size() > 0) { gamma_ = kernel_params[0]; diff --git a/onnxruntime/core/providers/cpu/ml/svmregressor.cc b/onnxruntime/core/providers/cpu/ml/svmregressor.cc index 2a3c47c5e3134..ba1ace0b09b2e 100644 --- a/onnxruntime/core/providers/cpu/ml/svmregressor.cc +++ b/onnxruntime/core/providers/cpu/ml/svmregressor.cc @@ -19,9 +19,9 @@ SVMRegressor::SVMRegressor(const OpKernelInfo& info) vector_count_(info.GetAttrOrDefault("n_supports", 0)), support_vectors_(info.GetAttrsOrDefault("support_vectors")), post_transform_(MakeTransform(info.GetAttrOrDefault("post_transform", "NONE"))) { - ONNXRUNTIME_ENFORCE(info.GetAttrs("rho", rho_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("coefficients", coefficients_).IsOK()); - ONNXRUNTIME_ENFORCE(coefficients_.size() > 0); + ORT_ENFORCE(info.GetAttrs("rho", rho_).IsOK()); + ORT_ENFORCE(info.GetAttrs("coefficients", coefficients_).IsOK()); + ORT_ENFORCE(coefficients_.size() > 0); int64_t onec = info.GetAttrOrDefault("one_class", 0); one_class_ = (onec != 0); diff --git a/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc b/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc index cc26bdcc8d1b5..e756b172b6129 100644 --- a/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc +++ b/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc @@ -155,22 +155,22 @@ TreeEnsembleClassifier::TreeEnsembleClassifier(const OpKernelInfo& info) classlabels_strings_(info.GetAttrsOrDefault("classlabels_strings")), classlabels_int64s_(info.GetAttrsOrDefault("classlabels_int64s")), post_transform_(MakeTransform(info.GetAttrOrDefault("post_transform", "NONE"))) { - ONNXRUNTIME_ENFORCE(!nodes_treeids_.empty()); - ONNXRUNTIME_ENFORCE(class_nodeids_.size() == class_ids_.size()); - ONNXRUNTIME_ENFORCE(class_nodeids_.size() == class_weights_.size()); - ONNXRUNTIME_ENFORCE(nodes_nodeids_.size() == nodes_featureids_.size()); - ONNXRUNTIME_ENFORCE(nodes_nodeids_.size() == nodes_modes_names_.size()); - ONNXRUNTIME_ENFORCE(nodes_nodeids_.size() == nodes_values_.size()); - ONNXRUNTIME_ENFORCE(nodes_nodeids_.size() == nodes_truenodeids_.size()); - ONNXRUNTIME_ENFORCE(nodes_nodeids_.size() == nodes_falsenodeids_.size()); - ONNXRUNTIME_ENFORCE((nodes_nodeids_.size() == nodes_hitrates_.size()) || (nodes_hitrates_.empty())); + ORT_ENFORCE(!nodes_treeids_.empty()); + ORT_ENFORCE(class_nodeids_.size() == class_ids_.size()); + ORT_ENFORCE(class_nodeids_.size() == class_weights_.size()); + ORT_ENFORCE(nodes_nodeids_.size() == nodes_featureids_.size()); + ORT_ENFORCE(nodes_nodeids_.size() == nodes_modes_names_.size()); + ORT_ENFORCE(nodes_nodeids_.size() == nodes_values_.size()); + ORT_ENFORCE(nodes_nodeids_.size() == nodes_truenodeids_.size()); + ORT_ENFORCE(nodes_nodeids_.size() == nodes_falsenodeids_.size()); + ORT_ENFORCE((nodes_nodeids_.size() == nodes_hitrates_.size()) || (nodes_hitrates_.empty())); - ONNXRUNTIME_ENFORCE(classlabels_strings_.empty() ^ classlabels_int64s_.empty(), + ORT_ENFORCE(classlabels_strings_.empty() ^ classlabels_int64s_.empty(), "Must provide classlabels_strings or classlabels_int64s but not both."); // in the absence of bool type supported by GetAttrs this ensure that we don't have any negative // values so that we can check for the truth condition without worrying about negative values. - ONNXRUNTIME_ENFORCE(std::all_of( + ORT_ENFORCE(std::all_of( std::begin(missing_tracks_true_), std::end(missing_tracks_true_), [](int64_t elem) { return elem >= 0; })); @@ -266,7 +266,7 @@ void TreeEnsembleClassifier::Initialize() { // they must be in the same tree int64_t id = nodes_treeids_[i] * kOffset_ + nodes_truenodeids_[i]; it = parents.find(id); - ONNXRUNTIME_ENFORCE(it != parents.end()); + ORT_ENFORCE(it != parents.end()); it->second++; } // all false nodes arent roots_ @@ -275,7 +275,7 @@ void TreeEnsembleClassifier::Initialize() { // they must be in the same tree int64_t id = nodes_treeids_[i] * kOffset_ + nodes_falsenodeids_[i]; it = parents.find(id); - ONNXRUNTIME_ENFORCE(it != parents.end()); + ORT_ENFORCE(it != parents.end()); it->second++; } // find all the nodes that dont have other nodes pointing at them @@ -288,7 +288,7 @@ void TreeEnsembleClassifier::Initialize() { } class_count_ = !classlabels_strings_.empty() ? classlabels_strings_.size() : classlabels_int64s_.size(); using_strings_ = !classlabels_strings_.empty(); - ONNXRUNTIME_ENFORCE(base_values_.empty() || + ORT_ENFORCE(base_values_.empty() || base_values_.size() == static_cast(class_count_) || base_values_.size() == weights_classes_.size()); } @@ -324,7 +324,7 @@ common::Status TreeEnsembleClassifier::Compute(OpKernelContext* context) cons } // walk each tree from its root for (size_t j = 0, end = roots_.size(); j < end; ++j) { - ONNXRUNTIME_RETURN_IF_ERROR(ProcessTreeNode(classes, roots_[j], x_data, current_weight_0)); + ORT_RETURN_IF_ERROR(ProcessTreeNode(classes, roots_[j], x_data, current_weight_0)); } float maxweight = 0.f; int64_t maxclass = -1; @@ -473,7 +473,7 @@ common::Status TreeEnsembleClassifier::ProcessTreeNode(std::map= 0); + ORT_ENFORCE(treeindex >= 0); treeindex = treeindex + root; mode = static_cast(nodes_modes_[treeindex]); loopcount++; diff --git a/onnxruntime/core/providers/cpu/ml/treeregressor.cc b/onnxruntime/core/providers/cpu/ml/treeregressor.cc index 4fecec46d76a6..d2d88e4acf630 100644 --- a/onnxruntime/core/providers/cpu/ml/treeregressor.cc +++ b/onnxruntime/core/providers/cpu/ml/treeregressor.cc @@ -30,10 +30,10 @@ TreeEnsembleRegressor::TreeEnsembleRegressor(const OpKernelInfo& info) base_values_(info.GetAttrsOrDefault("base_values")), transform_(::onnxruntime::ml::MakeTransform(info.GetAttrOrDefault("post_transform", "NONE"))), aggregate_function_(::onnxruntime::ml::MakeAggregateFunction(info.GetAttrOrDefault("aggregate_function", "SUM"))) { - ONNXRUNTIME_ENFORCE(info.GetAttr("n_targets", &n_targets_).IsOK()); + ORT_ENFORCE(info.GetAttr("n_targets", &n_targets_).IsOK()); //update nodeids to start at 0 - ONNXRUNTIME_ENFORCE(!nodes_treeids_.empty()); + ORT_ENFORCE(!nodes_treeids_.empty()); int64_t current_tree_id = 1234567891L; std::vector tree_offsets; @@ -63,14 +63,14 @@ TreeEnsembleRegressor::TreeEnsembleRegressor(const OpKernelInfo& info) } size_t nodes_id_size = nodes_nodeids_.size(); - ONNXRUNTIME_ENFORCE(target_nodeids_.size() == target_ids_.size()); - ONNXRUNTIME_ENFORCE(target_nodeids_.size() == target_weights_.size()); - ONNXRUNTIME_ENFORCE(nodes_id_size == nodes_featureids_.size()); - ONNXRUNTIME_ENFORCE(nodes_id_size == nodes_values_.size()); - ONNXRUNTIME_ENFORCE(nodes_id_size == nodes_modes_.size()); - ONNXRUNTIME_ENFORCE(nodes_id_size == nodes_truenodeids_.size()); - ONNXRUNTIME_ENFORCE(nodes_id_size == nodes_falsenodeids_.size()); - ONNXRUNTIME_ENFORCE((nodes_id_size == nodes_hitrates_.size()) || (0 == nodes_hitrates_.size())); + ORT_ENFORCE(target_nodeids_.size() == target_ids_.size()); + ORT_ENFORCE(target_nodeids_.size() == target_weights_.size()); + ORT_ENFORCE(nodes_id_size == nodes_featureids_.size()); + ORT_ENFORCE(nodes_id_size == nodes_values_.size()); + ORT_ENFORCE(nodes_id_size == nodes_modes_.size()); + ORT_ENFORCE(nodes_id_size == nodes_truenodeids_.size()); + ORT_ENFORCE(nodes_id_size == nodes_falsenodeids_.size()); + ORT_ENFORCE((nodes_id_size == nodes_hitrates_.size()) || (0 == nodes_hitrates_.size())); max_tree_depth_ = 1000; offset_ = four_billion_; @@ -122,7 +122,7 @@ TreeEnsembleRegressor::TreeEnsembleRegressor(const OpKernelInfo& info) //they must be in the same tree int64_t id = nodes_treeids_[i] * offset_ + nodes_truenodeids_[i]; it = parents.find(id); - ONNXRUNTIME_ENFORCE(it != parents.end()); + ORT_ENFORCE(it != parents.end()); it->second++; } //all false nodes aren't roots @@ -131,7 +131,7 @@ TreeEnsembleRegressor::TreeEnsembleRegressor(const OpKernelInfo& info) //they must be in the same tree int64_t id = nodes_treeids_[i] * offset_ + nodes_falsenodeids_[i]; it = parents.find(id); - ONNXRUNTIME_ENFORCE(it != parents.end()); + ORT_ENFORCE(it != parents.end()); it->second++; } //find all the nodes that dont have other nodes pointing at them @@ -139,11 +139,11 @@ TreeEnsembleRegressor::TreeEnsembleRegressor(const OpKernelInfo& info) if (parent.second == 0) { int64_t id = parent.first; it = indices.find(id); - ONNXRUNTIME_ENFORCE(it != indices.end()); + ORT_ENFORCE(it != indices.end()); roots_.push_back(it->second); } } - ONNXRUNTIME_ENFORCE(base_values_.empty() || base_values_.size() == static_cast(n_targets_)); + ORT_ENFORCE(base_values_.empty() || base_values_.size() == static_cast(n_targets_)); } template @@ -236,7 +236,7 @@ common::Status TreeEnsembleRegressor::Compute(OpKernelContext* context) const //for each tree for (size_t j = 0; j < roots_.size(); j++) { //walk each tree from its root - ONNXRUNTIME_RETURN_IF_ERROR(ProcessTreeNode(scores, roots_[j], x_data, current_weight_0)); + ORT_RETURN_IF_ERROR(ProcessTreeNode(scores, roots_[j], x_data, current_weight_0)); } //find aggregate, could use a heap here if there are many classes std::vector outputs; diff --git a/onnxruntime/core/providers/cpu/ml/zipmap.cc b/onnxruntime/core/providers/cpu/ml/zipmap.cc index db4e52488642c..4a1080d347ffc 100644 --- a/onnxruntime/core/providers/cpu/ml/zipmap.cc +++ b/onnxruntime/core/providers/cpu/ml/zipmap.cc @@ -39,7 +39,7 @@ ZipMapOp::ZipMapOp(const OpKernelInfo& info) : OpKernel(info), classlabels_int64s_(info.GetAttrsOrDefault("classlabels_int64s")), classlabels_strings_(info.GetAttrsOrDefault("classlabels_strings")) { - ONNXRUNTIME_ENFORCE(classlabels_strings_.empty() ^ classlabels_int64s_.empty(), + ORT_ENFORCE(classlabels_strings_.empty() ^ classlabels_int64s_.empty(), "Must provide classlabels_strings or classlabels_int64s but not both."); using_strings_ = !classlabels_strings_.empty(); } diff --git a/onnxruntime/core/providers/cpu/nn/autopad_type.h b/onnxruntime/core/providers/cpu/nn/autopad_type.h index a98a88ff5da70..bc0d65221731e 100644 --- a/onnxruntime/core/providers/cpu/nn/autopad_type.h +++ b/onnxruntime/core/providers/cpu/nn/autopad_type.h @@ -27,7 +27,7 @@ inline AutoPadType StringToAutoPadType(const std::string& str) { } else if (str == "SAME_LOWER") { return AutoPadType::SAME_LOWER; } else { - ONNXRUNTIME_ENFORCE(false, "Unknown AutoPadType String"); + ORT_ENFORCE(false, "Unknown AutoPadType String"); } } } // namespace onnxruntime diff --git a/onnxruntime/core/providers/cpu/nn/batch_norm.cc b/onnxruntime/core/providers/cpu/nn/batch_norm.cc index c0fd62944e120..22d84112baf01 100644 --- a/onnxruntime/core/providers/cpu/nn/batch_norm.cc +++ b/onnxruntime/core/providers/cpu/nn/batch_norm.cc @@ -34,7 +34,7 @@ Status BatchNorm::Compute(OpKernelContext* p_op_kernel_context) const { const Tensor* mean = p_op_kernel_context->Input(3); const Tensor* var = p_op_kernel_context->Input(4); - ONNXRUNTIME_RETURN_IF_ERROR(BatchNormHelper::ValidateInputs(X, scale, B, mean, var)); + ORT_RETURN_IF_ERROR(BatchNormHelper::ValidateInputs(X, scale, B, mean, var)); const TensorShape& x_shape = X->Shape(); Tensor* Y = p_op_kernel_context->Output(0, x_shape); diff --git a/onnxruntime/core/providers/cpu/nn/batch_norm_helper.h b/onnxruntime/core/providers/cpu/nn/batch_norm_helper.h index d6f7861bccfae..6161dcff5c248 100644 --- a/onnxruntime/core/providers/cpu/nn/batch_norm_helper.h +++ b/onnxruntime/core/providers/cpu/nn/batch_norm_helper.h @@ -30,31 +30,31 @@ class BatchNormHelper { int64_t num_channels = X->Shape().GetDims()[1]; if (scale->Shape().NumDimensions() != kNumInputScaleDimensions) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input scale: NumDimensions() != ", kNumInputScaleDimensions); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input scale: NumDimensions() != ", kNumInputScaleDimensions); } if (scale->Shape().GetDims()[0] != num_channels) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input scale: 0th dimension != ", num_channels); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input scale: 0th dimension != ", num_channels); } if (B->Shape().NumDimensions() != kNumInputBiasDimensions) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input B: NumDimensions() != ", kNumInputBiasDimensions); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input B: NumDimensions() != ", kNumInputBiasDimensions); } if (B->Shape().GetDims()[0] != num_channels) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input B: 0th dimension != ", num_channels); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input B: 0th dimension != ", num_channels); } if (mean->Shape().NumDimensions() != kNumInputMeanDimensions) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input mean: NumDimensions() != ", kNumInputMeanDimensions); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input mean: NumDimensions() != ", kNumInputMeanDimensions); } if (mean->Shape().GetDims()[0] != num_channels) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input mean: 0th dimension != ", num_channels); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input mean: 0th dimension != ", num_channels); } if (var->Shape().NumDimensions() != kNumInputVarianceDimensions) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input var: NumDimensions() != ", kNumInputVarianceDimensions); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input var: NumDimensions() != ", kNumInputVarianceDimensions); } if (var->Shape().GetDims()[0] != num_channels) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input var: 0th dimension != ", num_channels); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Invalid input var: 0th dimension != ", num_channels); } return common::Status::OK(); diff --git a/onnxruntime/core/providers/cpu/nn/conv.cc b/onnxruntime/core/providers/cpu/nn/conv.cc index 26ac4a90c0a0f..d633aa8fe1b83 100644 --- a/onnxruntime/core/providers/cpu/nn/conv.cc +++ b/onnxruntime/core/providers/cpu/nn/conv.cc @@ -14,19 +14,19 @@ Status Conv::Compute(OpKernelContext* context) const { const int64_t N = X->Shape()[0]; const int64_t C = X->Shape()[1]; const int64_t M = W->Shape()[0]; - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInputShape(X, W)); + ORT_RETURN_IF_ERROR(ValidateInputShape(X, W)); std::vector kernel_shape = ComputeKernelShape(W->Shape()); if (kernel_shape.size() + 2 != W->Shape().NumDimensions()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape num_dims is not compatible with W num_dims.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape num_dims is not compatible with W num_dims.", " kernel_shape: ", TensorShape(kernel_shape).ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } for (size_t i = 0; i < kernel_shape.size(); ++i) { if (kernel_shape[i] != W->Shape()[i + 2]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape is not compatible with W shape.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape is not compatible with W shape.", " kernel_shape: ", TensorShape(kernel_shape).ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } @@ -48,12 +48,12 @@ Status Conv::Compute(OpKernelContext* context) const { std::vector Y_dims; Y_dims.insert(Y_dims.begin(), {N, M}); TensorShape input_shape = X->Shape().Slice(2); - ONNXRUNTIME_RETURN_IF_ERROR(InferOutputShape(input_shape, kernel_shape, strides, dilations, &pads, &Y_dims)); + ORT_RETURN_IF_ERROR(InferOutputShape(input_shape, kernel_shape, strides, dilations, &pads, &Y_dims)); Tensor* Y = context->Output(0, TensorShape(Y_dims)); TensorShape output_shape = Y->Shape().Slice(2); AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); const float* Xdata = X->template Data(); float* Ydata = Y->template MutableData(); diff --git a/onnxruntime/core/providers/cpu/nn/conv_base.h b/onnxruntime/core/providers/cpu/nn/conv_base.h index e80acb245da64..1af951a894c85 100644 --- a/onnxruntime/core/providers/cpu/nn/conv_base.h +++ b/onnxruntime/core/providers/cpu/nn/conv_base.h @@ -35,7 +35,7 @@ Status ComputePadAndOutputShape( break; case AutoPadType::SAME_UPPER: case AutoPadType::SAME_LOWER: { - ONNXRUNTIME_ENFORCE(dilation == 1, "Dilation not supported for AutoPadType::SAME_UPPER or AutoPadType::SAME_LOWER."); + ORT_ENFORCE(dilation == 1, "Dilation not supported for AutoPadType::SAME_UPPER or AutoPadType::SAME_LOWER."); int64_t legacy_target_size = (in_dim + stride - 1) / stride; int64_t pad_needed = (legacy_target_size - 1) * stride + kernel - in_dim; *out_dim = (in_dim + pad_needed - dkernel) / stride + 1; @@ -91,13 +91,13 @@ class ConvBase { #if false // TODO: Re-enable when attributes values are guaranteed to be filled. std::string auto_pad; - ONNXRUNTIME_ENFORCE(info.GetAttr("auto_pad", &auto_pad).IsOK()); + ORT_ENFORCE(info.GetAttr("auto_pad", &auto_pad).IsOK()); auto_pad_ = StringToAutoPadType(auto_pad); - ONNXRUNTIME_ENFORCE(info.GetAttr("group", &group_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("kernel_shape", kernel_shape_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("strides", strides_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("pads", pads_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("dilations", dilations_).IsOK()); + ORT_ENFORCE(info.GetAttr("group", &group_).IsOK()); + ORT_ENFORCE(info.GetAttrs("kernel_shape", kernel_shape_).IsOK()); + ORT_ENFORCE(info.GetAttrs("strides", strides_).IsOK()); + ORT_ENFORCE(info.GetAttrs("pads", pads_).IsOK()); + ORT_ENFORCE(info.GetAttrs("dilations", dilations_).IsOK()); #endif } @@ -119,20 +119,20 @@ class ConvBase { const int64_t M = W->Shape()[0]; if (X->Shape().NumDimensions() != W->Shape().NumDimensions()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "X num_dims does not match W num_dims.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "X num_dims does not match W num_dims.", " X: ", X->Shape().ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } if (C != W->Shape()[1] * group_) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input channels C is not equal to kernel channels * group.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input channels C is not equal to kernel channels * group.", " C: ", C, " kernel channels: ", W->Shape()[1], " group: ", group_); } if (M % group_ != 0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Output channels M is not divisible by group.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Output channels M is not divisible by group.", " M: ", M, " group: ", group_); } @@ -151,10 +151,10 @@ class ConvBase { if (dim >= strides.size() || dim >= kernel_shape.size() || dim >= dilations.size() || dim >= pads->size() || rank + dim >= pads->size()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Out of bound access to array"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Out of bound access to array"); } int64_t dim_size = 0; - ONNXRUNTIME_RETURN_IF_ERROR(ComputePadAndOutputShape( + ORT_RETURN_IF_ERROR(ComputePadAndOutputShape( input_shape[dim], strides[dim], kernel_shape[dim], diff --git a/onnxruntime/core/providers/cpu/nn/conv_impl.h b/onnxruntime/core/providers/cpu/nn/conv_impl.h index 40038f9cb120d..c22e9a6e7dd7b 100644 --- a/onnxruntime/core/providers/cpu/nn/conv_impl.h +++ b/onnxruntime/core/providers/cpu/nn/conv_impl.h @@ -37,7 +37,7 @@ void fuse_activation(const std::string& activation, T* y_data, size_t size, floa } else if (activation == "LeakyRelu") { y_vec = (y_vec >= 0).select(y_vec, (T)alpha * y_vec); } else { - ONNXRUNTIME_NOT_IMPLEMENTED("Not implemented fused activation: ", activation); + ORT_NOT_IMPLEMENTED("Not implemented fused activation: ", activation); } } @@ -51,19 +51,19 @@ Status Conv::Compute(OpKernelContext* context) const { const int64_t N = X->Shape()[0]; const int64_t C = X->Shape()[1]; const int64_t M = W->Shape()[0]; - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInputShape(X, W)); + ORT_RETURN_IF_ERROR(ValidateInputShape(X, W)); std::vector kernel_shape = ComputeKernelShape(W->Shape()); if (kernel_shape.size() + 2 != W->Shape().NumDimensions()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape num_dims is not compatible with W num_dims.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape num_dims is not compatible with W num_dims.", " kernel_shape: ", TensorShape(kernel_shape).ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } for (size_t i = 0; i < kernel_shape.size(); ++i) { if (kernel_shape[i] != W->Shape()[i + 2]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape is not compatible with W shape.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape is not compatible with W shape.", " kernel_shape: ", TensorShape(kernel_shape).ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } @@ -86,7 +86,7 @@ Status Conv::Compute(OpKernelContext* context) const { std::vector Y_dims; Y_dims.insert(Y_dims.begin(), {N, M}); TensorShape input_shape = X->Shape().Slice(2); - ONNXRUNTIME_RETURN_IF_ERROR(InferOutputShape(input_shape, kernel_shape, strides, dilations, &pads, &Y_dims)); + ORT_RETURN_IF_ERROR(InferOutputShape(input_shape, kernel_shape, strides, dilations, &pads, &Y_dims)); Tensor* Y = context->Output(0, TensorShape(Y_dims)); TensorShape output_shape = Y->Shape().Slice(2); @@ -100,7 +100,7 @@ Status Conv::Compute(OpKernelContext* context) const { const int64_t col_buffer_size = kernel_dim * output_image_size; AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); auto col_data = alloc->Alloc(sizeof(T) * col_buffer_size); BufferUniquePtr col_buffer(col_data, BufferDeleter(alloc)); diff --git a/onnxruntime/core/providers/cpu/nn/conv_transpose.cc b/onnxruntime/core/providers/cpu/nn/conv_transpose.cc index 939269d190238..62fccf0f7a1cb 100644 --- a/onnxruntime/core/providers/cpu/nn/conv_transpose.cc +++ b/onnxruntime/core/providers/cpu/nn/conv_transpose.cc @@ -37,7 +37,7 @@ inline void ComputeTransposePadAndOutputShape( int64_t* pad_tail, int64_t* out_size) { if (*out_size != -1) { - ONNXRUNTIME_ENFORCE(*out_size >= 0); + ORT_ENFORCE(*out_size >= 0); // total padding size int64_t paddings = std::max(0, (in_size - 1) * stride + kernel + adj - *out_size); if (pad_type == AutoPadType::SAME_UPPER) { // pad more on head when paddings are odd. @@ -81,7 +81,7 @@ Status ConvTransposeBase::PrepareForCompute(OpKernelContext* context, bool has_b // input validations if (group_ <= 0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "group count is <= 0", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "group count is <= 0", " group: ", group_); } @@ -89,12 +89,12 @@ Status ConvTransposeBase::PrepareForCompute(OpKernelContext* context, bool has_b // This condition is not true for two tests in ONNX tests series: // test_convtranspose_1d_cpu, test_convtranspose_3d_cpu. // TODO: the error message should tell which operator raises it. - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input X must be 4-dimensional.", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input X must be 4-dimensional.", " X: ", X->Shape().ToString().c_str()); } if (input_shape.NumDimensions() != F->Shape().NumDimensions()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "X num_dims does not match W num_dims.", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "X num_dims does not match W num_dims.", " X: ", X->Shape().ToString().c_str(), " W: ", F->Shape().ToString().c_str()); } @@ -102,7 +102,7 @@ Status ConvTransposeBase::PrepareForCompute(OpKernelContext* context, bool has_b const int64_t num_input_channels = input_shape[1]; if (F->Shape()[0] != num_input_channels) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "filter number not equal to input channel number.", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "filter number not equal to input channel number.", " filter_number: ", F->Shape()[0], " num_input_channels: ", num_input_channels); } @@ -117,7 +117,7 @@ Status ConvTransposeBase::PrepareForCompute(OpKernelContext* context, bool has_b // num_input_channels is k*group_. hence removing the check for num_output_channels here. if (num_input_channels % group_ != 0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input channels is not divisible by group.", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input channels is not divisible by group.", " num_input_channels: ", num_input_channels, " group: ", group_); } @@ -125,13 +125,13 @@ Status ConvTransposeBase::PrepareForCompute(OpKernelContext* context, bool has_b std::vector kernel_shape = ComputeKernelShape(F->Shape()); if (kernel_shape[0] != F->Shape()[2]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "kernel height does not match filter height.", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "kernel height does not match filter height.", " kernel_height: ", kernel_shape[0], " filter_height: ", F->Shape()[2]); } if (kernel_shape[1] != F->Shape()[3]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "kernel width does not match filter width.", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "kernel width does not match filter width.", " kernel_width: ", kernel_shape[1], " filter_width: ", F->Shape()[3]); } @@ -192,8 +192,8 @@ void ConvTransposeBase::ComputePadsAndOutputShape( if (output_shape_size != 0) { output_height = output_shape_[output_shape_size - 2]; output_width = output_shape_[output_shape_size - 1]; - ONNXRUNTIME_ENFORCE(output_height >= H, "Output height cannot be smaller than input height."); - ONNXRUNTIME_ENFORCE(output_width >= W, "Output width cannot be smaller than input width."); + ORT_ENFORCE(output_height >= H, "Output height cannot be smaller than input height."); + ORT_ENFORCE(output_width >= W, "Output width cannot be smaller than input width."); } ComputeTransposePadAndOutputShape( @@ -223,7 +223,7 @@ template Status ConvTranspose::Compute(OpKernelContext* context) const { size_t num_inputs = OpKernel::Node().InputDefs().size(); Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(context, num_inputs == 3, p)); + ORT_RETURN_IF_ERROR(PrepareForCompute(context, num_inputs == 3, p)); const int64_t input_image_size = p.H * p.W; const int64_t X_offset = p.num_input_channels / group_ * input_image_size; @@ -233,7 +233,7 @@ Status ConvTranspose::Compute(OpKernelContext* context) const { const int64_t output_image_size = p.Y->Shape()[2] * p.Y->Shape()[3]; AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); auto col_data = alloc->Alloc(sizeof(T) * kernel_dim * p.H * p.W); BufferUniquePtr col_buffer(col_data, BufferDeleter(alloc)); diff --git a/onnxruntime/core/providers/cpu/nn/flatten.h b/onnxruntime/core/providers/cpu/nn/flatten.h index a1c8881f35328..3af8f074aa2bf 100644 --- a/onnxruntime/core/providers/cpu/nn/flatten.h +++ b/onnxruntime/core/providers/cpu/nn/flatten.h @@ -13,7 +13,7 @@ namespace onnxruntime { class Flatten final : public OpKernel { public: Flatten(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("axis", &axis_).IsOK()); + ORT_ENFORCE(info.GetAttr("axis", &axis_).IsOK()); } Status Compute(OpKernelContext* context) const override { @@ -21,7 +21,7 @@ class Flatten final : public OpKernel { if (X == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); const TensorShape& X_shape = X->Shape(); - ONNXRUNTIME_ENFORCE(gsl::narrow_cast(X_shape.NumDimensions()) >= axis_, "The rank of input tensor must be >= axis"); + ORT_ENFORCE(gsl::narrow_cast(X_shape.NumDimensions()) >= axis_, "The rank of input tensor must be >= axis"); Tensor* Y = context->Output(0, TensorShape({X_shape.SizeToDimension(axis_), X_shape.SizeFromDimension(axis_)})); diff --git a/onnxruntime/core/providers/cpu/nn/instance_norm.cc b/onnxruntime/core/providers/cpu/nn/instance_norm.cc index 3903b76dffc84..72040f1f168ac 100644 --- a/onnxruntime/core/providers/cpu/nn/instance_norm.cc +++ b/onnxruntime/core/providers/cpu/nn/instance_norm.cc @@ -20,7 +20,7 @@ Status InstanceNorm::Compute(OpKernelContext* p_op_kernel_context) const const Tensor* scale = p_op_kernel_context->Input(1); const Tensor* B = p_op_kernel_context->Input(2); - ONNXRUNTIME_RETURN_IF_ERROR(InstanceNormHelper::ValidateInputs(input, scale, B)); + ORT_RETURN_IF_ERROR(InstanceNormHelper::ValidateInputs(input, scale, B)); const int64_t N = input->Shape().GetDims()[0]; const int64_t C = input->Shape().GetDims()[1]; const int64_t W = input->Shape().SizeFromDimension(2); diff --git a/onnxruntime/core/providers/cpu/nn/instance_norm.h b/onnxruntime/core/providers/cpu/nn/instance_norm.h index 0a6f35e07f08f..1c1ab35e04156 100644 --- a/onnxruntime/core/providers/cpu/nn/instance_norm.h +++ b/onnxruntime/core/providers/cpu/nn/instance_norm.h @@ -11,7 +11,7 @@ template class InstanceNorm final : public OpKernel { public: InstanceNorm(const OpKernelInfo& op_kernel_info) : OpKernel(op_kernel_info) { - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("epsilon", &epsilon_).IsOK()); + ORT_ENFORCE(op_kernel_info.GetAttr("epsilon", &epsilon_).IsOK()); } Status Compute(OpKernelContext* p_op_kernel_context) const override; diff --git a/onnxruntime/core/providers/cpu/nn/lp_norm.h b/onnxruntime/core/providers/cpu/nn/lp_norm.h index 932776ec2a19f..23fd8d7f3f29b 100644 --- a/onnxruntime/core/providers/cpu/nn/lp_norm.h +++ b/onnxruntime/core/providers/cpu/nn/lp_norm.h @@ -25,9 +25,9 @@ template class LpNorm final : public OpKernel { public: LpNorm(const OpKernelInfo& op_kernel_info) : OpKernel(op_kernel_info) { - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("axis", &axis_).IsOK()); - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("p", &p_).IsOK()); - ONNXRUNTIME_ENFORCE(p_ == 1 || p_ == 2); + ORT_ENFORCE(op_kernel_info.GetAttr("axis", &axis_).IsOK()); + ORT_ENFORCE(op_kernel_info.GetAttr("p", &p_).IsOK()); + ORT_ENFORCE(p_ == 1 || p_ == 2); } Status Compute(OpKernelContext* p_op_kernel_context) const override; diff --git a/onnxruntime/core/providers/cpu/nn/lrn.cc b/onnxruntime/core/providers/cpu/nn/lrn.cc index eb1978b16c898..b48dbf69dc5be 100644 --- a/onnxruntime/core/providers/cpu/nn/lrn.cc +++ b/onnxruntime/core/providers/cpu/nn/lrn.cc @@ -29,7 +29,7 @@ Status LRN::Compute(OpKernelContext* context) const { Tensor* Y = context->Output(0, X->Shape()); // Supports NCHW image format. - ONNXRUNTIME_ENFORCE(X->Shape().NumDimensions() == 4); + ORT_ENFORCE(X->Shape().NumDimensions() == 4); const int N = gsl::narrow_cast(X->Shape()[0]); const int C = gsl::narrow_cast(X->Shape()[1]); const int H = gsl::narrow_cast(X->Shape()[2]); @@ -41,7 +41,7 @@ Status LRN::Compute(OpKernelContext* context) const { float* Ydata = Y->template MutableData(); AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); const int Xsize = gsl::narrow_cast(X->Shape().Size()); auto sdata = alloc->Alloc(sizeof(float) * Xsize); diff --git a/onnxruntime/core/providers/cpu/nn/lrn.h b/onnxruntime/core/providers/cpu/nn/lrn.h index 031189cf613b8..9e3f913b22b4c 100644 --- a/onnxruntime/core/providers/cpu/nn/lrn.h +++ b/onnxruntime/core/providers/cpu/nn/lrn.h @@ -16,14 +16,14 @@ class LRN : public OpKernel { public: LRN(const OpKernelInfo& info) : OpKernel(info) { int64_t size; - ONNXRUNTIME_ENFORCE(info.GetAttr("size", &size).IsOK()); + ORT_ENFORCE(info.GetAttr("size", &size).IsOK()); size_ = gsl::narrow_cast(size); - ONNXRUNTIME_ENFORCE(size_ > 0); - ONNXRUNTIME_ENFORCE(size_ % 2 == 1); - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(alpha_ > 0.0f); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); - ONNXRUNTIME_ENFORCE(beta_ > 0.0f); + ORT_ENFORCE(size_ > 0); + ORT_ENFORCE(size_ % 2 == 1); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(alpha_ > 0.0f); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(beta_ > 0.0f); Status status = info.GetAttr("bias", &bias_); if (!status.IsOK()) { bias_ = 1.0f; diff --git a/onnxruntime/core/providers/cpu/nn/pool.cc b/onnxruntime/core/providers/cpu/nn/pool.cc index a76322ef31670..1725bbb9cdd52 100644 --- a/onnxruntime/core/providers/cpu/nn/pool.cc +++ b/onnxruntime/core/providers/cpu/nn/pool.cc @@ -12,7 +12,7 @@ Status Pool::Compute(OpKernelContext* context) const { const Tensor* X = context->Input(0); const TensorShape& x_shape = X->Shape(); - ONNXRUNTIME_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); + ORT_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); std::vector pads = pads_; std::vector kernel_shape = kernel_shape_; @@ -166,14 +166,14 @@ Status PoolBase::Compute(OpKernelContext* context, MLAS_POOLING_KIND kind) const const TensorShape& x_shape = X->Shape(); size_t input_dims = x_shape.NumDimensions(); - ONNXRUNTIME_RETURN_IF_NOT(input_dims >= 3, "Input dimension cannot be less than 3."); + ORT_RETURN_IF_NOT(input_dims >= 3, "Input dimension cannot be less than 3."); size_t pooling_dims = input_dims - 2; if (pooling_dims > 3) { return Status(ONNXRUNTIME, INVALID_ARGUMENT, "Unsupported pooling size."); } if (!global_pooling_) { - ONNXRUNTIME_RETURN_IF_NOT(pooling_dims == kernel_shape_.size(), "kernel_shape num_dims is not compatible with X num_dims."); + ORT_RETURN_IF_NOT(pooling_dims == kernel_shape_.size(), "kernel_shape num_dims is not compatible with X num_dims."); } std::vector pads = pads_; @@ -213,7 +213,7 @@ Status Pool>::Compute(OpKernelContext* context) co const Tensor* X = context->Input(0); const TensorShape& x_shape = X->Shape(); - ONNXRUNTIME_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); + ORT_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); std::vector pads = pads_; std::vector kernel_shape = kernel_shape_; diff --git a/onnxruntime/core/providers/cpu/nn/pool_base.h b/onnxruntime/core/providers/cpu/nn/pool_base.h index 124ea992e5a37..ae8a519c6fcdb 100644 --- a/onnxruntime/core/providers/cpu/nn/pool_base.h +++ b/onnxruntime/core/providers/cpu/nn/pool_base.h @@ -27,7 +27,7 @@ class PoolProcessContext { friend class LpPool; PoolProcessContext() {} void init(const OpKernelInfo& info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("p", &p_).IsOK()); + ORT_ENFORCE(info.GetAttr("p", &p_).IsOK()); } }; @@ -104,11 +104,11 @@ class PoolBase { global_pooling_ = (op_name_ == "GlobalAveragePool" || op_name_ == "GlobalMaxPool" || op_name_ == "GlobalLpPool"); if (!global_pooling_) { - ONNXRUNTIME_ENFORCE(info.GetAttrs("kernel_shape", kernel_shape_).IsOK(), + ORT_ENFORCE(info.GetAttrs("kernel_shape", kernel_shape_).IsOK(), "No kernel shape is set."); std::string auto_padding; - ONNXRUNTIME_ENFORCE(info.GetAttr("auto_pad", &auto_padding).IsOK()); + ORT_ENFORCE(info.GetAttr("auto_pad", &auto_padding).IsOK()); auto_pad_ = StringToAutoPadType(auto_padding); if (!info.GetAttrs("pads", pads_).IsOK() || pads_.empty()) { @@ -121,7 +121,7 @@ class PoolBase { if (op_name_ == "AveragePool") { int64_t temp; - ONNXRUNTIME_ENFORCE(info.GetAttr("count_include_pad", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("count_include_pad", &temp).IsOK()); count_include_pad_ = (temp != 0); } @@ -134,12 +134,12 @@ class PoolBase { } for (size_t dim = 0; dim < kernel_shape_.size(); ++dim) { - ONNXRUNTIME_ENFORCE(kernel_shape_[dim] > 0); - ONNXRUNTIME_ENFORCE(pads_[dim] < kernel_shape_[dim] && pads_[dim + kernel_shape_.size()] < kernel_shape_[dim], + ORT_ENFORCE(kernel_shape_[dim] > 0); + ORT_ENFORCE(pads_[dim] < kernel_shape_[dim] && pads_[dim + kernel_shape_.size()] < kernel_shape_[dim], "Pad should be smaller than kernel."); } - ONNXRUNTIME_ENFORCE(strides_.size() == kernel_shape_.size()); + ORT_ENFORCE(strides_.size() == kernel_shape_.size()); } } @@ -148,7 +148,7 @@ class PoolBase { std::vector SetOutputSize(const TensorShape& input_shape, int64_t output_channel, std::vector* pads) const { - ONNXRUNTIME_ENFORCE(input_shape.Size() > 0); + ORT_ENFORCE(input_shape.Size() > 0); std::vector output_dims; int64_t N = input_shape[0]; InferOutputSize(input_shape.GetDims(), &output_dims, pads); @@ -161,7 +161,7 @@ class PoolBase { inline void InferOutputSize(const std::vector& input_dims, std::vector* output_dims, std::vector* pads) const { - ONNXRUNTIME_ENFORCE(input_dims.size() >= 2); + ORT_ENFORCE(input_dims.size() >= 2); if (global_pooling_) { output_dims->assign(input_dims.size() - 2, 1); } else { @@ -208,7 +208,7 @@ class PoolBase { break; } default: { - ONNXRUNTIME_THROW("Unsupported AutoPad Type."); + ORT_THROW("Unsupported AutoPad Type."); } } } else { diff --git a/onnxruntime/core/providers/cpu/nn/roi_pool.cc b/onnxruntime/core/providers/cpu/nn/roi_pool.cc index 4ab1b0bd03bfc..0a127778038a5 100644 --- a/onnxruntime/core/providers/cpu/nn/roi_pool.cc +++ b/onnxruntime/core/providers/cpu/nn/roi_pool.cc @@ -24,7 +24,7 @@ Status RoiPool::Compute(OpKernelContext* context) const { int num_rois = static_cast(R->Shape()[0]); // Each ROI is of the form [batch_index x1 y1 x2 y2] - ONNXRUNTIME_ENFORCE(R->Shape()[1] == 5); + ORT_ENFORCE(R->Shape()[1] == 5); std::vector output_dims({num_rois, channels, pooled_height_, pooled_width_}); @@ -41,8 +41,8 @@ Status RoiPool::Compute(OpKernelContext* context) const { int roi_start_h = static_cast(round(rois[2] * spatial_scale_)); int roi_end_w = static_cast(round(rois[3] * spatial_scale_)); int roi_end_h = static_cast(round(rois[4] * spatial_scale_)); - ONNXRUNTIME_ENFORCE(roi_batch_id >= 0); - ONNXRUNTIME_ENFORCE(roi_batch_id < batch_size); + ORT_ENFORCE(roi_batch_id >= 0); + ORT_ENFORCE(roi_batch_id < batch_size); // Force malformed ROIs to be 1x1 int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); diff --git a/onnxruntime/core/providers/cpu/nn/roi_pool.h b/onnxruntime/core/providers/cpu/nn/roi_pool.h index 73e775cc00368..46e64517dab33 100644 --- a/onnxruntime/core/providers/cpu/nn/roi_pool.h +++ b/onnxruntime/core/providers/cpu/nn/roi_pool.h @@ -14,16 +14,16 @@ class RoiPool : public OpKernel { public: RoiPool(const OpKernelInfo& info) : OpKernel(info) { std::vector pooled_shape; - ONNXRUNTIME_ENFORCE(info.GetAttrs("pooled_shape", pooled_shape).IsOK()); - ONNXRUNTIME_ENFORCE(pooled_shape.size() == 2); + ORT_ENFORCE(info.GetAttrs("pooled_shape", pooled_shape).IsOK()); + ORT_ENFORCE(pooled_shape.size() == 2); pooled_height_ = pooled_shape[0]; pooled_width_ = pooled_shape[1]; - ONNXRUNTIME_ENFORCE(pooled_height_ > 0); - ONNXRUNTIME_ENFORCE(pooled_width_ > 0); + ORT_ENFORCE(pooled_height_ > 0); + ORT_ENFORCE(pooled_width_ > 0); - ONNXRUNTIME_ENFORCE(info.GetAttr("spatial_scale", &spatial_scale_).IsOK()); - ONNXRUNTIME_ENFORCE(spatial_scale_ > 0); + ORT_ENFORCE(info.GetAttr("spatial_scale", &spatial_scale_).IsOK()); + ORT_ENFORCE(spatial_scale_ > 0); } ~RoiPool() override = default; diff --git a/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc b/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc index a332aa2c33b14..8e0424195d3d2 100644 --- a/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc +++ b/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc @@ -49,12 +49,12 @@ bool PrepareForReduce(OpKernelContext* ctx, bool keepdims_, bool check_no_transpose = false) { const Tensor* input_tensor_ptr = ctx->Input(0); - ONNXRUNTIME_ENFORCE(input_tensor_ptr != nullptr); + ORT_ENFORCE(input_tensor_ptr != nullptr); const Tensor& input = *input_tensor_ptr; size_t ndim = input.Shape().GetDims().size(); for (int64_t axe : axes_) { - ONNXRUNTIME_ENFORCE(axe >= 0 && axe < (int64_t)ndim, "Axis attribute out of range"); + ORT_ENFORCE(axe >= 0 && axe < (int64_t)ndim, "Axis attribute out of range"); } std::vector axes = axes_; diff --git a/onnxruntime/core/providers/cpu/reduction/reduction_ops.h b/onnxruntime/core/providers/cpu/reduction/reduction_ops.h index 9c3b986c10052..404275f7ba07d 100644 --- a/onnxruntime/core/providers/cpu/reduction/reduction_ops.h +++ b/onnxruntime/core/providers/cpu/reduction/reduction_ops.h @@ -20,7 +20,7 @@ class ReduceKernelBase { axes_.push_back(v); } int64_t keepdims = 1; - ONNXRUNTIME_ENFORCE(info.GetAttr("keepdims", &keepdims).IsOK()); + ORT_ENFORCE(info.GetAttr("keepdims", &keepdims).IsOK()); keepdims_ = (keepdims == 1); } diff --git a/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.cc b/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.cc index 3ff458482005f..16a71586349a7 100644 --- a/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.cc +++ b/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.cc @@ -273,9 +273,9 @@ Status DeepCpuGruOp::Compute(OpKernelContext* context) const { else if (data_type == DataTypeImpl::GetType()) { /* Need to update all the helpers to support double... status = ComputeImpl(*context); */ - ONNXRUNTIME_NOT_IMPLEMENTED("GRU operator does not support double yet"); + ORT_NOT_IMPLEMENTED("GRU operator does not support double yet"); } else - ONNXRUNTIME_THROW("Invalid data type for GRU operator of ", data_type); + ORT_THROW("Invalid data type for GRU operator of ", data_type); return status; } @@ -300,7 +300,7 @@ Status DeepCpuGruOp::ComputeImpl(OpKernelContext& context) const { int input_size = gsl::narrow(X_shape[2]); auto status = ValidateCommonRnnInputs(X, W, R, B, 3, sequence_lens, initial_h, num_directions_, hidden_size_); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); // GRU outputs are optional but must be in the same order TensorShape Y_dims{seq_length, num_directions_, batch_size, hidden_size_}; @@ -311,7 +311,7 @@ Status DeepCpuGruOp::ComputeImpl(OpKernelContext& context) const { AllocatorPtr alloc; status = context.GetTempSpaceAllocator(&alloc); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); gsl::span input_weights = W.DataAsSpan(); gsl::span recurrent_weights = R.DataAsSpan(); gsl::span bias = B != nullptr ? B->DataAsSpan() : gsl::span(); @@ -483,7 +483,7 @@ UniDirectionalGru::UniDirectionalGru(AllocatorPtr allocator, // replicate what we just wrote to the start of the output span so we have batch_size_ copies auto values = output.cbegin(); - ONNXRUNTIME_IGNORE_RETURN_VALUE(RepeatVectorToConstructArray(values, values + hidden_size_, + ORT_IGNORE_RETURN_VALUE(RepeatVectorToConstructArray(values, values + hidden_size_, output.begin() + hidden_size_, // skip the first batch batch_size_ - 1)); // and replicate batch size - 1 times }; @@ -495,8 +495,8 @@ UniDirectionalGru::UniDirectionalGru(AllocatorPtr allocator, // how we treat the h weight depends on whether linear_before_reset_ is set if (linear_before_reset_) { // need to replicate Wb[o] and Rb[o] separately - ONNXRUNTIME_IGNORE_RETURN_VALUE(RepeatVectorToConstructArray(bias_Wo.cbegin(), bias_Wo.cend(), batched_bias_Wh_.begin(), batch_size_)); - ONNXRUNTIME_IGNORE_RETURN_VALUE(RepeatVectorToConstructArray(bias_Ro.cbegin(), bias_Ro.cend(), batched_bias_Rh_.begin(), batch_size_)); + ORT_IGNORE_RETURN_VALUE(RepeatVectorToConstructArray(bias_Wo.cbegin(), bias_Wo.cend(), batched_bias_Wh_.begin(), batch_size_)); + ORT_IGNORE_RETURN_VALUE(RepeatVectorToConstructArray(bias_Ro.cbegin(), bias_Ro.cend(), batched_bias_Rh_.begin(), batch_size_)); } else { combine_and_replicate(bias_Wo, bias_Ro, batched_bias_WRh_); } @@ -1085,7 +1085,7 @@ void UniDirectionalGru::SetNumThreads() { VLOGS(logger_, 1) << "Hidden Threads : " << hidden_num_threads_; } - ONNXRUNTIME_ENFORCE(hidden_num_threads_ >= 1); + ORT_ENFORCE(hidden_num_threads_ >= 1); } } // namespace detail } // namespace onnxruntime diff --git a/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h b/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h index 5deb6a05cec06..cef9b6452a3ca 100644 --- a/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h +++ b/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h @@ -19,13 +19,13 @@ class DeepCpuGruOp final : public OpKernel { DeepCpuGruOp(const OpKernelInfo& info) : OpKernel(info) { // required attributes std::string direction; - ONNXRUNTIME_ENFORCE(info.GetAttr("direction", &direction).IsOK()); + ORT_ENFORCE(info.GetAttr("direction", &direction).IsOK()); int64_t int64_value; - ONNXRUNTIME_ENFORCE(info.GetAttr("linear_before_reset", &int64_value).IsOK()); + ORT_ENFORCE(info.GetAttr("linear_before_reset", &int64_value).IsOK()); linear_before_reset_ = gsl::narrow(int64_value); - ONNXRUNTIME_ENFORCE(info.GetAttr("hidden_size", &int64_value).IsOK() && int64_value > 0); + ORT_ENFORCE(info.GetAttr("hidden_size", &int64_value).IsOK() && int64_value > 0); hidden_size_ = gsl::narrow(int64_value); // optional attributes @@ -34,7 +34,7 @@ class DeepCpuGruOp final : public OpKernel { std::vector activation_func_betas = info.GetAttrsOrDefault("activation_beta"); clip_ = info.GetAttrOrDefault("clip", std::numeric_limits::max()); - ONNXRUNTIME_ENFORCE(clip_ > 0.f); + ORT_ENFORCE(clip_ > 0.f); direction_ = rnn::detail::MakeDirection(direction); num_directions_ = direction_ == rnn::detail::Direction::kBidirectional ? 2 : 1; @@ -46,7 +46,7 @@ class DeepCpuGruOp final : public OpKernel { } } - ONNXRUNTIME_ENFORCE(activation_func_names.size() == num_directions_ * 2); + ORT_ENFORCE(activation_func_names.size() == num_directions_ * 2); activation_funcs_ = rnn::detail::ActivationFuncs(activation_func_names, activation_func_alphas, diff --git a/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.cc b/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.cc index 1a2ae2fd9d38b..06560b72cfab3 100644 --- a/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.cc +++ b/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.cc @@ -314,9 +314,9 @@ DeepCpuLstmOp::Compute(OpKernelContext* context) const { else if (data_type == DataTypeImpl::GetType()) { /* Need to update all the helpers to support double... status = ComputeImpl(*context); */ - ONNXRUNTIME_NOT_IMPLEMENTED("LSTM operator does not support double yet"); + ORT_NOT_IMPLEMENTED("LSTM operator does not support double yet"); } else - ONNXRUNTIME_THROW("Invalid data type for LSTM operator of ", data_type); + ORT_THROW("Invalid data type for LSTM operator of ", data_type); return status; } @@ -350,7 +350,7 @@ Status DeepCpuLstmOp::ComputeImpl(OpKernelContext& context) const { int input_size = gsl::narrow(X_shape[2]); Status status = ValidateInputs(X, W, R, B, sequence_lens, initial_h, initial_c, P, batch_size); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); // LSTM outputs are optional but must be in the same order TensorShape Y_dims{seq_length, num_directions_, batch_size, hidden_size_}; @@ -364,7 +364,7 @@ Status DeepCpuLstmOp::ComputeImpl(OpKernelContext& context) const { AllocatorPtr alloc; status = context.GetTempSpaceAllocator(&alloc); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); gsl::span input_weights = W.DataAsSpan(); gsl::span recurrent_weights = R.DataAsSpan(); @@ -507,7 +507,7 @@ Status DeepCpuLstmOp::ValidateInputs(const Tensor& X, const Tensor& W, const Ten const Tensor* P, int batch_size) const { auto status = rnn::detail::ValidateCommonRnnInputs(X, W, R, B, 4, sequence_lens, initial_h, num_directions_, hidden_size_); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); if (initial_c != nullptr) { auto& initial_c_shape = initial_c->Shape(); @@ -517,7 +517,7 @@ Status DeepCpuLstmOp::ValidateInputs(const Tensor& X, const Tensor& W, const Ten initial_c_shape[1] != batch_size || initial_c_shape[2] != hidden_size_) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_c must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_c must have shape {", num_directions_, ",", batch_size, ",", hidden_size_, "}. Actual:", initial_c_shape); } @@ -528,7 +528,7 @@ Status DeepCpuLstmOp::ValidateInputs(const Tensor& X, const Tensor& W, const Ten p_shape[0] != num_directions_ || p_shape[1] != 3 * hidden_size_) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input P must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input P must have shape {", num_directions_, ",", 3 * hidden_size_, "}. Actual:", p_shape); } diff --git a/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.h b/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.h index 790268b94acf1..3d2b186de9d49 100644 --- a/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.h +++ b/onnxruntime/core/providers/cpu/rnn/deep_cpu_lstm.h @@ -18,17 +18,17 @@ class DeepCpuLstmOp final : public OpKernel { DeepCpuLstmOp(const OpKernelInfo& info) : OpKernel(info), clip_(info.GetAttrOrDefault("clip", std::numeric_limits::max())) { std::string direction; - ONNXRUNTIME_ENFORCE(info.GetAttr("direction", &direction).IsOK()); + ORT_ENFORCE(info.GetAttr("direction", &direction).IsOK()); int64_t int64_value; - ONNXRUNTIME_ENFORCE(info.GetAttr("hidden_size", &int64_value).IsOK() && int64_value > 0); + ORT_ENFORCE(info.GetAttr("hidden_size", &int64_value).IsOK() && int64_value > 0); hidden_size_ = gsl::narrow(int64_value); // optional attributes std::vector activation_func_names = info.GetAttrsOrDefault("activations"); std::vector activation_func_alphas = info.GetAttrsOrDefault("activation_alpha"); std::vector activation_func_betas = info.GetAttrsOrDefault("activation_beta"); - ONNXRUNTIME_ENFORCE(clip_ > 0.f); + ORT_ENFORCE(clip_ > 0.f); if (info.GetAttr("input_forget", &int64_value).IsOK()) input_forget_ = int64_value != 0; @@ -44,7 +44,7 @@ class DeepCpuLstmOp final : public OpKernel { } } - ONNXRUNTIME_ENFORCE(activation_func_names.size() == num_directions_ * 3); + ORT_ENFORCE(activation_func_names.size() == num_directions_ * 3); activation_funcs_ = rnn::detail::ActivationFuncs(activation_func_names, activation_func_alphas, diff --git a/onnxruntime/core/providers/cpu/rnn/rnn.cc b/onnxruntime/core/providers/cpu/rnn/rnn.cc index 4a3f2f5ee050d..c64ebb44047b1 100644 --- a/onnxruntime/core/providers/cpu/rnn/rnn.cc +++ b/onnxruntime/core/providers/cpu/rnn/rnn.cc @@ -117,7 +117,7 @@ Status RNN::Compute(OpKernelContext* ctx) const { auto status = rnn::detail::ValidateCommonRnnInputs(X, W, R, B, 1, sequence_lens, initial_h, num_directions, hidden_size_); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); // RNN outputs are optional std::vector Y_dims({seq_length, num_directions, batch_size, hidden_size_}); @@ -127,7 +127,7 @@ Status RNN::Compute(OpKernelContext* ctx) const { Tensor* Y_h = ctx->Output(1, Y_h_dims); AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(ctx->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(ctx->GetTempSpaceAllocator(&alloc)); // X * W^t, each direction has shape of [seq_length, batch_size, hidden_size] auto x_matmul_data = alloc->Alloc(sizeof(float) * seq_length * batch_size * hidden_size_); diff --git a/onnxruntime/core/providers/cpu/rnn/rnn.h b/onnxruntime/core/providers/cpu/rnn/rnn.h index 5c59e1a42df87..3e292c75a389c 100644 --- a/onnxruntime/core/providers/cpu/rnn/rnn.h +++ b/onnxruntime/core/providers/cpu/rnn/rnn.h @@ -18,15 +18,15 @@ class RNN : public OpKernel { public: RNN(const OpKernelInfo& info) : OpKernel(info), clip_(info.GetAttrOrDefault("clip", -1.0f)) { - ONNXRUNTIME_ENFORCE(info.GetAttr("direction", &direction_).IsOK()); - ONNXRUNTIME_ENFORCE(allowed_directions.find(direction_) != allowed_directions.end()); + ORT_ENFORCE(info.GetAttr("direction", &direction_).IsOK()); + ORT_ENFORCE(allowed_directions.find(direction_) != allowed_directions.end()); const int num_directions = direction_ == "bidirectional" ? 2 : 1; activation_alpha_ = info.GetAttrsOrDefault("activation_alpha", std::vector(num_directions, 0.0F)); activation_beta_ = info.GetAttrsOrDefault("activation_beta", std::vector(num_directions, 0.0F)); - ONNXRUNTIME_ENFORCE(info.GetAttrs("activations", activations_).IsOK()); + ORT_ENFORCE(info.GetAttrs("activations", activations_).IsOK()); //TODO: is it optional or not? - ONNXRUNTIME_ENFORCE(info.GetAttr("hidden_size", &hidden_size_).IsOK()); + ORT_ENFORCE(info.GetAttr("hidden_size", &hidden_size_).IsOK()); if (activations_.size() == 2 && num_directions == 1) { // ONNX RNN default activations are {"Tanh", "Tanh"} @@ -34,9 +34,9 @@ class RNN : public OpKernel { activations_.resize(1); } - ONNXRUNTIME_ENFORCE(activations_.size() == num_directions); + ORT_ENFORCE(activations_.size() == num_directions); for (int direction = 1; direction < num_directions; direction++) { - ONNXRUNTIME_ENFORCE(allowed_activations.find(activations_[direction]) != allowed_activations.end()); + ORT_ENFORCE(allowed_activations.find(activations_[direction]) != allowed_activations.end()); } } diff --git a/onnxruntime/core/providers/cpu/rnn/rnn_activation_functors.h b/onnxruntime/core/providers/cpu/rnn/rnn_activation_functors.h index dde46b425175f..8f2be9d1cf97e 100644 --- a/onnxruntime/core/providers/cpu/rnn/rnn_activation_functors.h +++ b/onnxruntime/core/providers/cpu/rnn/rnn_activation_functors.h @@ -10,7 +10,7 @@ #pragma warning(disable : 4100) #endif -#define RNN_UNUSED_PARAMETER ONNXRUNTIME_ATTRIBUTE_UNUSED = 0 +#define RNN_UNUSED_PARAMETER ORT_ATTRIBUTE_UNUSED = 0 namespace onnxruntime { namespace rnn { namespace detail { @@ -68,12 +68,12 @@ template inline T Softsign(T x, T alpha, T beta); template <> -inline float Softsign(float x, float alpha ONNXRUNTIME_ATTRIBUTE_UNUSED, float beta ONNXRUNTIME_ATTRIBUTE_UNUSED) { +inline float Softsign(float x, float alpha ORT_ATTRIBUTE_UNUSED, float beta ORT_ATTRIBUTE_UNUSED) { return x / (1 + fabs(x)); } template <> -inline double Softsign(double x, double alpha ONNXRUNTIME_ATTRIBUTE_UNUSED, double beta ONNXRUNTIME_ATTRIBUTE_UNUSED) { +inline double Softsign(double x, double alpha ORT_ATTRIBUTE_UNUSED, double beta ORT_ATTRIBUTE_UNUSED) { return x / (1 + fabs(x)); } diff --git a/onnxruntime/core/providers/cpu/rnn/rnn_helpers.cc b/onnxruntime/core/providers/cpu/rnn/rnn_helpers.cc index c455c67c99a9c..21c5c093569aa 100644 --- a/onnxruntime/core/providers/cpu/rnn/rnn_helpers.cc +++ b/onnxruntime/core/providers/cpu/rnn/rnn_helpers.cc @@ -41,13 +41,13 @@ Status ValidateCommonRnnInputs(const Tensor& X, int64_t input_size = X_shape[2]; if (X_shape.NumDimensions() != 3) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input X must have 3 dimensions only. Actual:", X_shape); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input X must have 3 dimensions only. Actual:", X_shape); if (W_shape.NumDimensions() != 3 || W_shape[0] != num_directions || W_shape[1] != hidden_size * WRB_dim_1_multipler || W_shape[2] != input_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input W must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input W must have shape {", num_directions, ",", WRB_dim_1_multipler, "*", hidden_size, ",", input_size, "}. Actual:", W_shape); @@ -55,7 +55,7 @@ Status ValidateCommonRnnInputs(const Tensor& X, R_shape[0] != num_directions || R_shape[1] != hidden_size * WRB_dim_1_multipler || R_shape[2] != hidden_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input R must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input R must have shape {", num_directions, ",", WRB_dim_1_multipler, "*", hidden_size, ",", hidden_size, "}. Actual:", R_shape); @@ -64,7 +64,7 @@ Status ValidateCommonRnnInputs(const Tensor& X, if (B_shape.NumDimensions() != 2 || B_shape[0] != num_directions || B_shape[1] != 2 * WRB_dim_1_multipler * hidden_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input B must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input B must have shape {", num_directions, ",", 2 * WRB_dim_1_multipler, "*", hidden_size, "}. Actual:", B_shape); } @@ -72,7 +72,7 @@ Status ValidateCommonRnnInputs(const Tensor& X, auto& sequence_lens_shape = sequence_lens->Shape(); if (sequence_lens_shape.NumDimensions() != 1 || sequence_lens_shape[0] != batch_size) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input sequence_lens must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input sequence_lens must have shape {", batch_size, "}. Actual:", sequence_lens_shape); } @@ -80,7 +80,7 @@ Status ValidateCommonRnnInputs(const Tensor& X, if (std::any_of(sequence_len_entries.cbegin(), sequence_len_entries.cend(), [seq_length](int len) { return len <= 0 || len > seq_length; })) { - return ONNXRUNTIME_MAKE_STATUS( + return ORT_MAKE_STATUS( ONNXRUNTIME, INVALID_ARGUMENT, "Invalid value/s in sequence_lens. All values must be > 0 and < seq_length. seq_length=", seq_length); } @@ -94,7 +94,7 @@ Status ValidateCommonRnnInputs(const Tensor& X, initial_h_shape[1] != batch_size || initial_h_shape[2] != hidden_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_h must have shape {", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input initial_h must have shape {", num_directions, ",", batch_size, ",", hidden_size, "}. Actual:", initial_h_shape); } @@ -133,7 +133,7 @@ std::string NormalizeActivationArgumentAndGetAlphaBetaCount(const std::string& a auto usage_entry = NameToArgUsageMap.find(name); if (usage_entry == NameToArgUsageMap.end()) { - ONNXRUNTIME_THROW( + ORT_THROW( "Expecting activation to be one of Affine, Relu, LeakyRelu, " "ThresholdedRelu, Tanh, ScaledTanh, Sigmoid, HardSigmoid, " "Elu, Softsign, Softplus. Got " + @@ -262,9 +262,9 @@ inline void clip_for_tanh(const float* ps, float* pd, int c) { } void add_bias_into_ignore(const float* ps, float* pd, const int c) { - ONNXRUNTIME_UNUSED_PARAMETER(ps); - ONNXRUNTIME_UNUSED_PARAMETER(pd); - ONNXRUNTIME_UNUSED_PARAMETER(c); + ORT_UNUSED_PARAMETER(ps); + ORT_UNUSED_PARAMETER(pd); + ORT_UNUSED_PARAMETER(c); } void add_bias_into(const float* ps, float* pd, const int c) { @@ -284,7 +284,7 @@ void clip(const float b, float* pd, const int c) { } void clip_ignore_bias(const float b, const float* pb, float* pd, const int c) { - ONNXRUNTIME_UNUSED_PARAMETER(pb); + ORT_UNUSED_PARAMETER(pb); for (int i = 0; i < c; i++) { float x = pd[i]; @@ -311,8 +311,8 @@ void clip_add_bias(const float b, const float* pb, float* pd, const int c) { void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_sigmoid(ps1, ps1_c, c); @@ -335,8 +335,8 @@ void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_tanh(ps1, ps1_c, c); @@ -359,9 +359,9 @@ void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, void relu_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(ps1_c); - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(ps1_c); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { const float max = ps1[i] > 0 ? ps1[i] : 0.0f; @@ -372,7 +372,7 @@ void relu_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, void composed_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, std::function func, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(ps1_c); + ORT_UNUSED_PARAMETER(ps1_c); for (int i = 0; i < c; i++) { pd[i] = ps2[i] * func(ps1[i], alpha, beta); } @@ -380,9 +380,9 @@ void composed_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int void sigmoid_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(ps1_c); - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(ps1_c); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { float x = ps1[i]; @@ -392,9 +392,9 @@ void sigmoid_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd void tanh_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(ps1_c); - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(ps1_c); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { pd[i] = ::std::tanh(ps1[i]) * ps2[i]; @@ -402,8 +402,8 @@ void tanh_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, i } void sigmoid(float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_sigmoid_in_place(pd, c); @@ -425,8 +425,8 @@ void sigmoid(float* pd, int c, const float alpha, const float beta) { } void tanh(float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_tanh_in_place(pd, c); @@ -448,8 +448,8 @@ void tanh(float* pd, int c, const float alpha, const float beta) { } void relu(float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { if (pd[i] < 0) @@ -458,8 +458,8 @@ void relu(float* pd, int c, const float alpha, const float beta) { } void sigmoid_exact(float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { float x = pd[i]; @@ -468,8 +468,8 @@ void sigmoid_exact(float* pd, int c, const float alpha, const float beta) { } void tanh_exact(float* pd, int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { float x = pd[i]; @@ -486,8 +486,8 @@ void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_tanh_in_place(ps2, c); @@ -510,8 +510,8 @@ void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, const int c, void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_sigmoid_in_place(ps2, c); @@ -534,8 +534,8 @@ void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, const int c void gru_reset_gate_relu(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { const auto max = ps2[i] > 0 ? ps2[i] : 0.0f; @@ -553,8 +553,8 @@ void gru_reset_gate_composed(const float* ps1, float* ps2, float* pd, const int void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_tanh_in_place(ph, c); @@ -577,8 +577,8 @@ void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po void gru_output_gate_relu(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); for (int i = 0; i < c; i++) { float max = ph[i] > 0 ? ph[i] : 0.0f; @@ -596,8 +596,8 @@ void gru_output_gate_composed(float* ph, const float* pz, const float* ps, float void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta) { - ONNXRUNTIME_UNUSED_PARAMETER(alpha); - ONNXRUNTIME_UNUSED_PARAMETER(beta); + ORT_UNUSED_PARAMETER(alpha); + ORT_UNUSED_PARAMETER(beta); clip_for_sigmoid_in_place(ph, c); @@ -700,7 +700,7 @@ ActivationFuncPtr ActivationFuncByName(const std::string& func) { composed_activation_func(ps, c, Softplus, alpha, beta); }; - ONNXRUNTIME_THROW("Invalid activation function of ", func); + ORT_THROW("Invalid activation function of ", func); } LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func) { @@ -761,7 +761,7 @@ LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func) { composed_m(ps1, ps1_c, ps2, ps3, c, Softplus, alpha, beta); }; - ONNXRUNTIME_THROW("Invalid LSTM merge activation function of ", func); + ORT_THROW("Invalid LSTM merge activation function of ", func); } GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func) { @@ -814,7 +814,7 @@ GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func) { gru_reset_gate_composed(ps1, ps2, ps3, c, Softplus, alpha, beta); }; - ONNXRUNTIME_THROW("Invalid GRU reset gate activation function: ", func); + ORT_THROW("Invalid GRU reset gate activation function: ", func); } GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func) { @@ -875,7 +875,7 @@ GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func) { gru_output_gate_composed(ps1, ps2, ph, ps3, c, Softplus, alpha, beta); }; - ONNXRUNTIME_THROW("Invalid GRU hidden gate activation function: ", func); + ORT_THROW("Invalid GRU hidden gate activation function: ", func); } } // namespace deepcpu diff --git a/onnxruntime/core/providers/cpu/rnn/rnn_helpers.h b/onnxruntime/core/providers/cpu/rnn/rnn_helpers.h index fe56ae7ee7c1c..65632d6b5f7bc 100644 --- a/onnxruntime/core/providers/cpu/rnn/rnn_helpers.h +++ b/onnxruntime/core/providers/cpu/rnn/rnn_helpers.h @@ -40,7 +40,7 @@ inline Direction MakeDirection(const std::string& direction) { } else if (direction == "bidirectional") { return kBidirectional; } else { - ONNXRUNTIME_THROW("Invalid 'direction' argument of '", direction, + ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } } @@ -154,10 +154,10 @@ void ComputeGemm(const int M, const int ldc) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span - ONNXRUNTIME_ENFORCE(lda >= K && ldb >= K && ldc >= N); - ONNXRUNTIME_ENFORCE(A + (M * lda - (lda - K)) <= A_end); - ONNXRUNTIME_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); - ONNXRUNTIME_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); + ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); + ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); + ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); + ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx( CblasNoTrans, CblasTrans, @@ -173,7 +173,7 @@ template const T* SafeRawConstPointer(typename gsl::span::const_iterator cur, typename gsl::span::const_iterator end, size_t size) { - ONNXRUNTIME_ENFORCE(cur + size <= end); + ORT_ENFORCE(cur + size <= end); return &*cur; } @@ -181,7 +181,7 @@ const T* SafeRawConstPointer(typename gsl::span::const_iterator cur, // after validating the memory covered by the span supports the size required template const T* SafeRawConstPointer(gsl::span span, size_t offset, size_t size) { - ONNXRUNTIME_ENFORCE(offset + size <= size_t(span.size())); + ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } @@ -191,7 +191,7 @@ template T* SafeRawPointer(typename gsl::span::iterator cur, typename gsl::span::iterator end, size_t size) { - ONNXRUNTIME_ENFORCE(cur + size <= end); + ORT_ENFORCE(cur + size <= end); return &*cur; } @@ -199,7 +199,7 @@ T* SafeRawPointer(typename gsl::span::iterator cur, // after validating the memory covered by the span supports the size required template T* SafeRawPointer(typename gsl::span span, size_t offset, size_t size) { - ONNXRUNTIME_ENFORCE(offset + size <= size_t(span.size())); + ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } @@ -209,8 +209,8 @@ void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, i // #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug #ifdef NOTHREADS - ONNXRUNTIME_UNUSED_PARAMETER(ttp); - ONNXRUNTIME_UNUSED_PARAMETER(logger); + ORT_UNUSED_PARAMETER(ttp); + ORT_UNUSED_PARAMETER(logger); for (int i = 0; i < max; i += step) { (void)name; diff --git a/onnxruntime/core/providers/cpu/symbols.txt b/onnxruntime/core/providers/cpu/symbols.txt index f8c7a1349175d..f60ba073a64bf 100644 --- a/onnxruntime/core/providers/cpu/symbols.txt +++ b/onnxruntime/core/providers/cpu/symbols.txt @@ -1,71 +1,71 @@ -ONNXRuntimeAddCustomOp -ONNXRuntimeAddRefToObject -ONNXRuntimeAllocatorAlloc -ONNXRuntimeAllocatorFree -ONNXRuntimeAllocatorGetInfo -ONNXRuntimeAllocatorInfoGetId -ONNXRuntimeAllocatorInfoGetMemType -ONNXRuntimeAllocatorInfoGetName -ONNXRuntimeAllocatorInfoGetType -ONNXRuntimeCastTypeInfoToTensorInfo -ONNXRuntimeCloneSessionOptions -ONNXRuntimeCompareAllocatorInfo -ONNXRuntimeCreateAllocatorInfo -ONNXRuntimeCreateCpuAllocatorInfo -ONNXRuntimeCreateCpuExecutionProviderFactory -ONNXRuntimeCreateDefaultAllocator -ONNXRuntimeCreateInferenceSession -ONNXRuntimeCreateRunOptions -ONNXRuntimeCreateSessionOptions -ONNXRuntimeCreateTensorAsONNXValue -ONNXRuntimeCreateTensorTypeAndShapeInfo -ONNXRuntimeCreateTensorWithDataAsONNXValue -ONNXRuntimeDisableCpuMemArena -ONNXRuntimeDisableMemPattern -ONNXRuntimeDisableProfiling -ONNXRuntimeDisableSequentialExecution -ONNXRuntimeEnableCpuMemArena -ONNXRuntimeEnableMemPattern -ONNXRuntimeEnableProfiling -ONNXRuntimeEnableSequentialExecution -ONNXRuntimeFillStringTensor -ONNXRuntimeGetDimensions -ONNXRuntimeGetErrorCode -ONNXRuntimeGetErrorMessage -ONNXRuntimeGetNumOfDimensions -ONNXRuntimeGetStringTensorContent -ONNXRuntimeGetStringTensorDataLength -ONNXRuntimeGetTensorElementType -ONNXRuntimeGetTensorMutableData -ONNXRuntimeGetTensorShapeAndType -ONNXRuntimeGetTensorShapeElementCount -ONNXRuntimeGetTypeInfo -ONNXRuntimeGetValueType -ONNXRuntimeInferenceSessionGetInputCount -ONNXRuntimeInferenceSessionGetInputName -ONNXRuntimeInferenceSessionGetInputTypeInfo -ONNXRuntimeInferenceSessionGetOutputCount -ONNXRuntimeInferenceSessionGetOutputName -ONNXRuntimeInferenceSessionGetOutputTypeInfo -ONNXRuntimeInitialize -ONNXRuntimeInitializeWithCustomLogger -ONNXRuntimeIsTensor -ONNXRuntimeReleaseObject -ONNXRuntimeRunInference -ONNXRuntimeRunOptionsGetRunLogVerbosityLevel -ONNXRuntimeRunOptionsGetRunTag -ONNXRuntimeRunOptionsSetRunLogVerbosityLevel -ONNXRuntimeRunOptionsSetRunTag -ONNXRuntimeRunOptionsSetTerminate -ONNXRuntimeSessionOptionsAppendExecutionProvider -ONNXRuntimeSetDims -ONNXRuntimeSetSessionLogId -ONNXRuntimeSetSessionLogVerbosityLevel -ONNXRuntimeSetSessionThreadPoolSize -ONNXRuntimeSetTensorElementType -ONNXRuntimeTensorProtoToONNXValue +OrtAddCustomOp +OrtAddRefToObject +OrtAllocatorAlloc +OrtAllocatorFree +OrtAllocatorGetInfo +OrtAllocatorInfoGetId +OrtAllocatorInfoGetMemType +OrtAllocatorInfoGetName +OrtAllocatorInfoGetType +OrtCastTypeInfoToTensorInfo +OrtCloneSessionOptions +OrtCompareAllocatorInfo +OrtCreateAllocatorInfo +OrtCreateCpuAllocatorInfo +OrtCreateCpuExecutionProviderFactory +OrtCreateDefaultAllocator +OrtCreateInferenceSession +OrtCreateRunOptions +OrtCreateSessionOptions +OrtCreateTensorAsONNXValue +OrtCreateTensorTypeAndShapeInfo +OrtCreateTensorWithDataAsONNXValue +OrtDisableCpuMemArena +OrtDisableMemPattern +OrtDisableProfiling +OrtDisableSequentialExecution +OrtEnableCpuMemArena +OrtEnableMemPattern +OrtEnableProfiling +OrtEnableSequentialExecution +OrtFillStringTensor +OrtGetDimensions +OrtGetErrorCode +OrtGetErrorMessage +OrtGetNumOfDimensions +OrtGetStringTensorContent +OrtGetStringTensorDataLength +OrtGetTensorElementType +OrtGetTensorMutableData +OrtGetTensorShapeAndType +OrtGetTensorShapeElementCount +OrtGetTypeInfo +OrtGetValueType +OrtInferenceSessionGetInputCount +OrtInferenceSessionGetInputName +OrtInferenceSessionGetInputTypeInfo +OrtInferenceSessionGetOutputCount +OrtInferenceSessionGetOutputName +OrtInferenceSessionGetOutputTypeInfo +OrtInitialize +OrtInitializeWithCustomLogger +OrtIsTensor +OrtReleaseObject +OrtRunInference +OrtRunOptionsGetRunLogVerbosityLevel +OrtRunOptionsGetRunTag +OrtRunOptionsSetRunLogVerbosityLevel +OrtRunOptionsSetRunTag +OrtRunOptionsSetTerminate +OrtSessionOptionsAppendExecutionProvider +OrtSetDims +OrtSetSessionLogId +OrtSetSessionLogVerbosityLevel +OrtSetSessionThreadPoolSize +OrtSetTensorElementType +OrtTensorProtoToONNXValue ReleaseONNXEnv -ReleaseONNXRuntimeAllocatorInfo +ReleaseOrtAllocatorInfo ReleaseONNXSession ReleaseONNXStatus ReleaseONNXValue diff --git a/onnxruntime/core/providers/cpu/tensor/cast_op.cc b/onnxruntime/core/providers/cpu/tensor/cast_op.cc index 236b6c4b52413..c58b5a9182844 100644 --- a/onnxruntime/core/providers/cpu/tensor/cast_op.cc +++ b/onnxruntime/core/providers/cpu/tensor/cast_op.cc @@ -80,11 +80,11 @@ const std::vector castOpTypeConstraints{ } \ break; \ case TensorProto_DataType_STRING: \ - ONNXRUNTIME_THROW("Casting to and from strings is not supported yet."); /*break;*/ \ + ORT_THROW("Casting to and from strings is not supported yet."); /*break;*/ \ case TensorProto_DataType_UNDEFINED: \ - ONNXRUNTIME_THROW("Cast op must have 'to' argument of type DataType"); /*break;*/ \ + ORT_THROW("Cast op must have 'to' argument of type DataType"); /*break;*/ \ default: \ - ONNXRUNTIME_THROW("Unexpected 'to' argument value: ", to_); \ + ORT_THROW("Unexpected 'to' argument value: ", to_); \ } \ return Status::OK(); \ } @@ -161,11 +161,11 @@ Status Cast::Compute(OpKernelContext* context) const { st = CastFloat16Data(X, Y, shape, context); break; case TensorProto_DataType_STRING: - ONNXRUNTIME_THROW("Casting to and from strings is not supported yet."); /*break;*/ + ORT_THROW("Casting to and from strings is not supported yet."); /*break;*/ case TensorProto_DataType_UNDEFINED: - ONNXRUNTIME_THROW("Cast op must have 'to' argument of type DataType"); /*break;*/ + ORT_THROW("Cast op must have 'to' argument of type DataType"); /*break;*/ default: - ONNXRUNTIME_THROW("Unexpected 'to' argument value: ", to_); + ORT_THROW("Unexpected 'to' argument value: ", to_); } return st; } diff --git a/onnxruntime/core/providers/cpu/tensor/cast_op.h b/onnxruntime/core/providers/cpu/tensor/cast_op.h index 83e2b560a981f..e75c08790cb9e 100644 --- a/onnxruntime/core/providers/cpu/tensor/cast_op.h +++ b/onnxruntime/core/providers/cpu/tensor/cast_op.h @@ -50,11 +50,11 @@ inline void CastData(const Tensor* in, Tensor* out, const Tens template inline void CastFloat16Data(const Tensor* in, Tensor* out, const TensorShape& shape, const AllocatorPtr& allocator) { - ONNXRUNTIME_ENFORCE(allocator != nullptr); + ORT_ENFORCE(allocator != nullptr); const int64_t len = shape.Size(); - ONNXRUNTIME_ENFORCE(len > 0); + ORT_ENFORCE(len > 0); void* buffer = allocator->AllocArray(sizeof(float), len); - ONNXRUNTIME_ENFORCE(buffer); + ORT_ENFORCE(buffer); Tensor tmp_tensor(DataTypeImpl::GetType(), shape, buffer, allocator->Info(), nullptr); if (std::is_same::value) { CastData(in, &tmp_tensor, shape); // first cast to float @@ -72,7 +72,7 @@ class Cast final : public OpKernel { Cast(const OpKernelInfo& info) : OpKernel(info) { int64_t to; Status status = info.GetAttr("to", &to); - ONNXRUNTIME_ENFORCE(status.IsOK(), "Attribute to is not set."); + ORT_ENFORCE(status.IsOK(), "Attribute to is not set."); to_ = gsl::narrow_cast(to); } @@ -89,7 +89,7 @@ class Cast final : public OpKernel { typename DstType> Status CastFloat16Data(const Tensor* in, Tensor* out, const TensorShape& shape, OpKernelContext* context) const { AllocatorPtr allocator; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&allocator)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&allocator)); ::onnxruntime::CastFloat16Data(in, out, shape, allocator); return Status::OK(); } diff --git a/onnxruntime/core/providers/cpu/tensor/compress.cc b/onnxruntime/core/providers/cpu/tensor/compress.cc index 1598449e158cb..9926126a4146b 100644 --- a/onnxruntime/core/providers/cpu/tensor/compress.cc +++ b/onnxruntime/core/providers/cpu/tensor/compress.cc @@ -17,7 +17,7 @@ Status Compress::Compute(OpKernelContext* ctx) const { size_t rank = input_tensor->Shape().NumDimensions(); auto& input_dimensions = input_tensor->Shape().GetDims(); if (has_axis_) { - ONNXRUNTIME_ENFORCE(axis_ < static_cast(rank), "axis greater than input data dimension!"); + ORT_ENFORCE(axis_ < static_cast(rank), "axis greater than input data dimension!"); } const Tensor* condition = ctx->Input(1); diff --git a/onnxruntime/core/providers/cpu/tensor/concat.cc b/onnxruntime/core/providers/cpu/tensor/concat.cc index 54660885535d1..498d7ddda0d59 100644 --- a/onnxruntime/core/providers/cpu/tensor/concat.cc +++ b/onnxruntime/core/providers/cpu/tensor/concat.cc @@ -13,7 +13,7 @@ ONNX_CPU_OPERATOR_KERNEL( Concat); Status ConcatBase::PrepareForCompute(OpKernelContext* ctx, int input_count, Prepare& p) const { - ONNXRUNTIME_RETURN_IF_NOT(input_count >= 1, "Must have 1 or more inputs"); + ORT_RETURN_IF_NOT(input_count >= 1, "Must have 1 or more inputs"); const Tensor* tensor_pointer = ctx->Input(0); if (tensor_pointer == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); const Tensor& inputs_0 = *tensor_pointer; @@ -30,7 +30,7 @@ Status ConcatBase::PrepareForCompute(OpKernelContext* ctx, int input_count, Prep for (int axis_index = 0; axis_index < dimension_count; axis_index++) { if (axis_index == axis) continue; - ONNXRUNTIME_RETURN_IF_NOT(data_n.Shape()[axis_index] == inputs_0.Shape()[axis_index], "Non concat axis dimensions must match: Axis ", axis_index, " has mismatched dimensions of ", data_n.Shape()[axis_index], " and ", inputs_0.Shape()[axis_index]); + ORT_RETURN_IF_NOT(data_n.Shape()[axis_index] == inputs_0.Shape()[axis_index], "Non concat axis dimensions must match: Axis ", axis_index, " has mismatched dimensions of ", data_n.Shape()[axis_index], " and ", inputs_0.Shape()[axis_index]); } } @@ -59,10 +59,10 @@ Status ConcatBase::PrepareForCompute(OpKernelContext* ctx, int input_count, Prep for (int input_index = 0; input_index < input_count; input_index++) { const Tensor* data_n_ptr = ctx->Input(input_index); - ONNXRUNTIME_ENFORCE(data_n_ptr != nullptr); + ORT_ENFORCE(data_n_ptr != nullptr); auto& data_n = *data_n_ptr; - ONNXRUNTIME_RETURN_IF_NOT(data_n.DataType() == concat_result.DataType()); + ORT_RETURN_IF_NOT(data_n.DataType() == concat_result.DataType()); // The input_axis_pitch is the number of elements to add to move to the next split axis in the input int64_t input_axis_pitch = 1; @@ -79,7 +79,7 @@ Status Concat::Compute(OpKernelContext* ctx) const { auto input_count = Node().InputArgCount().front(); Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(ctx, input_count, p)); + ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, input_count, p)); auto is_string_type = ctx->Input(0)->DataType() == DataTypeImpl::GetType(); diff --git a/onnxruntime/core/providers/cpu/tensor/concat.h b/onnxruntime/core/providers/cpu/tensor/concat.h index 23f1f13cd4003..6493296501138 100644 --- a/onnxruntime/core/providers/cpu/tensor/concat.h +++ b/onnxruntime/core/providers/cpu/tensor/concat.h @@ -14,7 +14,7 @@ class ConcatBase { protected: ConcatBase(const OpKernelInfo& info) { if (!info.GetAttr("axis", &axis_).IsOK()) { - ONNXRUNTIME_ENFORCE(false, "Must have valid 'axis' attribute"); + ORT_ENFORCE(false, "Must have valid 'axis' attribute"); } } diff --git a/onnxruntime/core/providers/cpu/tensor/crop.h b/onnxruntime/core/providers/cpu/tensor/crop.h index e406d44e206f7..557e76cc8e84d 100644 --- a/onnxruntime/core/providers/cpu/tensor/crop.h +++ b/onnxruntime/core/providers/cpu/tensor/crop.h @@ -19,14 +19,14 @@ class CropBase { Status ValidateInput(const Tensor* X) const { if (border_.size() < 4) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Attribute border needs to be specified with four border elements, got ", border_.size()); } const auto dims = X->Shape().GetDims(); if (dims.size() < 4) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input is expected to have four dimensions corresponding to [N,C,H,W], got ", dims.size()); } @@ -40,11 +40,11 @@ class CropBase { bottomBorder = border_[3]; if (H < topBorder + bottomBorder) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's height (", H, ") needs to be greater than the topBorder (", topBorder, ") + bottomBorder (", bottomBorder, ")"); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's height (", H, ") needs to be greater than the topBorder (", topBorder, ") + bottomBorder (", bottomBorder, ")"); } if (W < leftBorder + rightBorder) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's width (", W, ") needs to be greater than the leftBorder (", leftBorder, ") + rightBorder (", rightBorder, ")"); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's width (", W, ") needs to be greater than the leftBorder (", leftBorder, ") + rightBorder (", rightBorder, ")"); } int64_t bottomLimit = H - bottomBorder; @@ -56,12 +56,12 @@ class CropBase { rightLimit = leftBorder + scale_[1]; if (H < bottomLimit) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's height (", H, ") needs to be greater than the topBorder (", topBorder, ") + scale_[0] (", scale_[0], ")"); } if (W < rightLimit) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's width (", W, ") needs to be greater than the leftBorder (", leftBorder, ") + scale_[1] (", scale_[1], ")"); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input's width (", W, ") needs to be greater than the leftBorder (", leftBorder, ") + scale_[1] (", scale_[1], ")"); } } @@ -80,7 +80,7 @@ class Crop final : public CropBase, public OpKernel { common::Status Compute(OpKernelContext* context) const override { const Tensor* X = context->Input(0); - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInput(X)); + ORT_RETURN_IF_ERROR(ValidateInput(X)); const auto dims = X->Shape().GetDims(); const int64_t N = dims[0]; diff --git a/onnxruntime/core/providers/cpu/tensor/eye_like.cc b/onnxruntime/core/providers/cpu/tensor/eye_like.cc index d93620ee37712..725f9a09990bf 100644 --- a/onnxruntime/core/providers/cpu/tensor/eye_like.cc +++ b/onnxruntime/core/providers/cpu/tensor/eye_like.cc @@ -28,7 +28,7 @@ ONNX_CPU_OPERATOR_KERNEL( Status EyeLike::Compute(OpKernelContext* context) const { const Tensor* T1 = context->Input(0); - ONNXRUNTIME_ENFORCE(T1 != nullptr); + ORT_ENFORCE(T1 != nullptr); auto output_tensor_dtype = has_dtype_ ? static_cast(dtype_) : utils::GetTensorProtoType(*T1); switch (output_tensor_dtype) { @@ -39,7 +39,7 @@ Status EyeLike::Compute(OpKernelContext* context) const { case onnx::TensorProto_DataType_UINT64: return ComputeImpl(context, T1); default: - ONNXRUNTIME_THROW("Unsupported 'dtype' value: ", output_tensor_dtype); + ORT_THROW("Unsupported 'dtype' value: ", output_tensor_dtype); } } diff --git a/onnxruntime/core/providers/cpu/tensor/gather.cc b/onnxruntime/core/providers/cpu/tensor/gather.cc index 5a8295c7420a1..716b31fef0140 100644 --- a/onnxruntime/core/providers/cpu/tensor/gather.cc +++ b/onnxruntime/core/providers/cpu/tensor/gather.cc @@ -42,7 +42,7 @@ Status GatherCopyData(const Tensor* indices_tensor, const uint8_t* src_base, uin for (int64_t i = 0; i < N; ++i) { Tin idx = indices_data[i]; if (idx < 0 || idx >= input_data_shape[axis]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "indices element out of data bounds, idx=", idx, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "indices element out of data bounds, idx=", idx, " data_dim=", input_data_shape[axis]); } } @@ -70,7 +70,7 @@ Status GatherCopyData(const Tensor* indices_tensor, const uint8_t* src_base, uin Status Gather::Compute(OpKernelContext* context) const { Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(context, p)); + ORT_RETURN_IF_ERROR(PrepareForCompute(context, p)); const TensorShape& input_data_shape = p.input_tensor->Shape(); @@ -96,7 +96,7 @@ Status Gather::Compute(OpKernelContext* context) const { block_size, M, N, data_batch_bytes, gathered_batch_bytes, input_data_shape, p.axis); } - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type for Tind not supported yet in Gather."); + return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type for Tind not supported yet in Gather."); } } // namespace onnxruntime diff --git a/onnxruntime/core/providers/cpu/tensor/gather.h b/onnxruntime/core/providers/cpu/tensor/gather.h index e6df9dd68ffef..0637b78e616d8 100644 --- a/onnxruntime/core/providers/cpu/tensor/gather.h +++ b/onnxruntime/core/providers/cpu/tensor/gather.h @@ -12,7 +12,7 @@ namespace onnxruntime { class GatherBase { protected: GatherBase(const OpKernelInfo& info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value"); + ORT_ENFORCE(info.GetAttr("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value"); } struct Prepare { diff --git a/onnxruntime/core/providers/cpu/tensor/identity_op.h b/onnxruntime/core/providers/cpu/tensor/identity_op.h index d7b54c4ba7267..19b824ebb731b 100644 --- a/onnxruntime/core/providers/cpu/tensor/identity_op.h +++ b/onnxruntime/core/providers/cpu/tensor/identity_op.h @@ -23,7 +23,7 @@ class IdentityOp final : public OpKernel { Status Compute(OpKernelContext* context) const override { const Tensor* X = context->Input(0); - ONNXRUNTIME_ENFORCE(X != nullptr); + ORT_ENFORCE(X != nullptr); const TensorShape& shape = X->Shape(); Tensor* Y = context->Output(0, shape); auto X_type = X->DataType(); diff --git a/onnxruntime/core/providers/cpu/tensor/image_scaler.h b/onnxruntime/core/providers/cpu/tensor/image_scaler.h index d16f9a0cbc02a..221e0a41f34eb 100644 --- a/onnxruntime/core/providers/cpu/tensor/image_scaler.h +++ b/onnxruntime/core/providers/cpu/tensor/image_scaler.h @@ -13,8 +13,8 @@ template class ImageScaler final : public OpKernel { public: ImageScaler(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("bias", bias_).IsOK()); + ORT_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); + ORT_ENFORCE(info.GetAttrs("bias", bias_).IsOK()); } Status Compute(OpKernelContext* context) const override { @@ -24,7 +24,7 @@ class ImageScaler final : public OpKernel { const auto dims = X->Shape().GetDims(); if (dims.size() < 4) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input is expected to have four dimensions corresponding to [N,C,H,W], got ", dims.size()); } @@ -34,7 +34,7 @@ class ImageScaler final : public OpKernel { const int64_t W = dims[3]; if (!bias_.empty() && bias_.size() != static_cast(C)) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Bias size (", bias_.size(), ") does not match the number of channels (", C, ")"); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Bias size (", bias_.size(), ") does not match the number of channels (", C, ")"); } Tensor* Y = context->Output(0, TensorShape({N, C, H, W})); diff --git a/onnxruntime/core/providers/cpu/tensor/mean_variance_normalization.h b/onnxruntime/core/providers/cpu/tensor/mean_variance_normalization.h index b02b2e30c9671..4448c672af018 100644 --- a/onnxruntime/core/providers/cpu/tensor/mean_variance_normalization.h +++ b/onnxruntime/core/providers/cpu/tensor/mean_variance_normalization.h @@ -14,8 +14,8 @@ class MeanVarianceNormalization_0 : public OpKernel { public: MeanVarianceNormalization_0(const OpKernelInfo& info, bool old_attr = true) : OpKernel(info) { if (old_attr) { - ONNXRUNTIME_ENFORCE(info.GetAttr("across_channels", &across_channels_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("normalize_variance", &normalize_variance_).IsOK()); + ORT_ENFORCE(info.GetAttr("across_channels", &across_channels_).IsOK()); + ORT_ENFORCE(info.GetAttr("normalize_variance", &normalize_variance_).IsOK()); } } diff --git a/onnxruntime/core/providers/cpu/tensor/pad.cc b/onnxruntime/core/providers/cpu/tensor/pad.cc index 3dbd4872a1acf..bac5f2655d393 100644 --- a/onnxruntime/core/providers/cpu/tensor/pad.cc +++ b/onnxruntime/core/providers/cpu/tensor/pad.cc @@ -53,8 +53,8 @@ Status Pad::Compute(OpKernelContext* ctx) const { std::vector output_dims(input_tensor.Shape().GetDims()); size_t dimension_count = output_dims.size(); - ONNXRUNTIME_ENFORCE(dimension_count > 0, "Input tensor has no dimensions"); - ONNXRUNTIME_ENFORCE(dimension_count * 2 == pads_.size(), "'pads' attribute has wrong number of values"); + ORT_ENFORCE(dimension_count > 0, "Input tensor has no dimensions"); + ORT_ENFORCE(dimension_count * 2 == pads_.size(), "'pads' attribute has wrong number of values"); std::vector input_starts; std::vector input_extents; diff --git a/onnxruntime/core/providers/cpu/tensor/pad.h b/onnxruntime/core/providers/cpu/tensor/pad.h index 50a02a4ca3edb..ebeca39f48813 100644 --- a/onnxruntime/core/providers/cpu/tensor/pad.h +++ b/onnxruntime/core/providers/cpu/tensor/pad.h @@ -18,10 +18,10 @@ class PadBase { else if (mode == "edge") mode_ = Mode::Edge; else - ONNXRUNTIME_THROW("Invalid 'mode' attribute value"); + ORT_THROW("Invalid 'mode' attribute value"); } if (!info.GetAttrs("pads", pads_).IsOK()) - ONNXRUNTIME_THROW("Invalid 'pads' attribute value"); + ORT_THROW("Invalid 'pads' attribute value"); // Separate out any negative pads_ into the slices_ array slices_.resize(pads_.size(), 0); diff --git a/onnxruntime/core/providers/cpu/tensor/reshape.h b/onnxruntime/core/providers/cpu/tensor/reshape.h index b8e52a2af2236..e5f5694826ac3 100644 --- a/onnxruntime/core/providers/cpu/tensor/reshape.h +++ b/onnxruntime/core/providers/cpu/tensor/reshape.h @@ -19,7 +19,7 @@ class Reshape final : public OpKernel { Status Compute(OpKernelContext* context) const override { // Copy the second input tensor into the shape vector const Tensor* shapeTensor = context->Input(1); - ONNXRUNTIME_ENFORCE(shapeTensor->Shape().NumDimensions() == 1, + ORT_ENFORCE(shapeTensor->Shape().NumDimensions() == 1, "A shape tensor must be a vector tensor."); size_t nDims = static_cast(shapeTensor->Shape()[0]); const int64_t* data = shapeTensor->template Data(); @@ -42,7 +42,7 @@ class Reshape_1 final : public OpKernel { public: Reshape_1(const OpKernelInfo& info) : OpKernel(info) { Status status = info.GetAttrs("shape", shape_); - ONNXRUNTIME_ENFORCE(status.IsOK(), "Attribute shape is not set."); + ORT_ENFORCE(status.IsOK(), "Attribute shape is not set."); } Status Compute(OpKernelContext* context) const override { diff --git a/onnxruntime/core/providers/cpu/tensor/reshape_helper.h b/onnxruntime/core/providers/cpu/tensor/reshape_helper.h index abb45e652c5fb..e5e5f0b9880c8 100644 --- a/onnxruntime/core/providers/cpu/tensor/reshape_helper.h +++ b/onnxruntime/core/providers/cpu/tensor/reshape_helper.h @@ -17,13 +17,13 @@ class ReshapeHelper { int64_t unknown_dim = -1; int64_t size = 1; for (size_t i = 0; i < nDims; ++i) { - ONNXRUNTIME_ENFORCE(requested_shape[i] >= -1, "A dimension cannot be less than -1."); + ORT_ENFORCE(requested_shape[i] >= -1, "A dimension cannot be less than -1."); if (requested_shape[i] == -1) { - ONNXRUNTIME_ENFORCE(unknown_dim == -1, "At most one dimension can be -1."); + ORT_ENFORCE(unknown_dim == -1, "At most one dimension can be -1."); unknown_dim = i; } else { if (requested_shape[i] == 0) { - ONNXRUNTIME_ENFORCE(i < input_shape.NumDimensions(), + ORT_ENFORCE(i < input_shape.NumDimensions(), "The dimension with value zero exceeds" " the dimension size of the input tensor."); requested_shape[i] = input_shape[i]; @@ -34,12 +34,12 @@ class ReshapeHelper { if (unknown_dim != -1) { // calculate unknown dimension - ONNXRUNTIME_ENFORCE((input_shape.Size() % size) == 0, + ORT_ENFORCE((input_shape.Size() % size) == 0, "The input tensor cannot be reshaped to the requested shape. Input shape:", input_shape); requested_shape[unknown_dim] = input_shape.Size() / size; } else { // check if the output shape is valid. - ONNXRUNTIME_ENFORCE(gsl::narrow_cast(input_shape.Size()) == size, + ORT_ENFORCE(gsl::narrow_cast(input_shape.Size()) == size, "The input tensor cannot be reshaped to the requested shape. Input shape:", input_shape); } } diff --git a/onnxruntime/core/providers/cpu/tensor/slice.cc b/onnxruntime/core/providers/cpu/tensor/slice.cc index f09e32fc3c393..23fae309648f6 100644 --- a/onnxruntime/core/providers/cpu/tensor/slice.cc +++ b/onnxruntime/core/providers/cpu/tensor/slice.cc @@ -79,7 +79,7 @@ Status SliceBase::PrepareForCompute(const size_t dimension_count, const std::vec template Status Slice::Compute(OpKernelContext* ctx) const { const Tensor* input_tensor_ptr = ctx->Input(0); - ONNXRUNTIME_ENFORCE(input_tensor_ptr != nullptr); + ORT_ENFORCE(input_tensor_ptr != nullptr); auto& input_tensor = *input_tensor_ptr; auto& input_dimensions = input_tensor.Shape().GetDims(); @@ -88,7 +88,7 @@ Status Slice::Compute(OpKernelContext* ctx) const { std::vector starts(dimension_count, 0); std::vector output_dims(input_dimensions); - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(dimension_count, input_dimensions, starts, output_dims)); + ORT_RETURN_IF_ERROR(PrepareForCompute(dimension_count, input_dimensions, starts, output_dims)); TensorShape output_shape(output_dims); auto& output_tensor = *ctx->Output(0, output_shape); diff --git a/onnxruntime/core/providers/cpu/tensor/slice.h b/onnxruntime/core/providers/cpu/tensor/slice.h index 26adcd9fdeb46..2c71693e45865 100644 --- a/onnxruntime/core/providers/cpu/tensor/slice.h +++ b/onnxruntime/core/providers/cpu/tensor/slice.h @@ -12,14 +12,14 @@ class SliceBase { SliceBase(const OpKernelInfo& info) { has_axes_ = info.GetAttrs("axes", axes_).IsOK(); - ONNXRUNTIME_ENFORCE(info.GetAttrs("starts", starts_).IsOK(), "Invalid 'starts' attribute value"); - ONNXRUNTIME_ENFORCE(info.GetAttrs("ends", ends_).IsOK(), "Invalid 'ends' attribute value"); + ORT_ENFORCE(info.GetAttrs("starts", starts_).IsOK(), "Invalid 'starts' attribute value"); + ORT_ENFORCE(info.GetAttrs("ends", ends_).IsOK(), "Invalid 'ends' attribute value"); if (has_axes_) { if (axes_.size() > starts_.size()) - ONNXRUNTIME_THROW("'axes' has more entries than the 'starts' attribute holds"); + ORT_THROW("'axes' has more entries than the 'starts' attribute holds"); if (axes_.size() > ends_.size()) - ONNXRUNTIME_THROW("'axes' has more entries than the 'ends' attribute holds"); + ORT_THROW("'axes' has more entries than the 'ends' attribute holds"); } } diff --git a/onnxruntime/core/providers/cpu/tensor/space_depth_ops.cc b/onnxruntime/core/providers/cpu/tensor/space_depth_ops.cc index 1532cf1ee8bf1..05a57b74c416d 100644 --- a/onnxruntime/core/providers/cpu/tensor/space_depth_ops.cc +++ b/onnxruntime/core/providers/cpu/tensor/space_depth_ops.cc @@ -38,13 +38,13 @@ Status SpaceToDepth::Compute(OpKernelContext* context) const { const Tensor* tensor_pointer = context->Input(0); if (tensor_pointer == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); const Tensor& input = *tensor_pointer; - ONNXRUNTIME_ENFORCE(input.Shape().NumDimensions() == 4); + ORT_ENFORCE(input.Shape().NumDimensions() == 4); const int64_t batch = input.Shape().GetDims().at(0); const int64_t input_depth = input.Shape().GetDims().at(1); const int64_t input_height = input.Shape().GetDims().at(2); const int64_t input_width = input.Shape().GetDims().at(3); - ONNXRUNTIME_ENFORCE(input_height % this->blocksize_ == 0); - ONNXRUNTIME_ENFORCE(input_width % this->blocksize_ == 0); + ORT_ENFORCE(input_height % this->blocksize_ == 0); + ORT_ENFORCE(input_width % this->blocksize_ == 0); const int64_t output_depth = input_depth * blocksize_ * blocksize_; const int64_t output_height = input_height / blocksize_; @@ -67,13 +67,13 @@ Status DepthToSpace::Compute(OpKernelContext* context) const { const Tensor* tensor_pointer = context->Input(0); if (tensor_pointer == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); const Tensor& input = *tensor_pointer; - ONNXRUNTIME_ENFORCE(input.Shape().NumDimensions() == 4); + ORT_ENFORCE(input.Shape().NumDimensions() == 4); const int64_t batch = input.Shape().GetDims().at(0); const int64_t input_depth = input.Shape().GetDims().at(1); const int64_t input_height = input.Shape().GetDims().at(2); const int64_t input_width = input.Shape().GetDims().at(3); - ONNXRUNTIME_ENFORCE(input_depth % (blocksize_ * blocksize_) == 0); + ORT_ENFORCE(input_depth % (blocksize_ * blocksize_) == 0); const int64_t output_depth = input_depth / blocksize_ / blocksize_; const int64_t output_height = input_height * blocksize_; diff --git a/onnxruntime/core/providers/cpu/tensor/space_depth_ops.h b/onnxruntime/core/providers/cpu/tensor/space_depth_ops.h index fec8c0caae3e8..660d36e90dc16 100644 --- a/onnxruntime/core/providers/cpu/tensor/space_depth_ops.h +++ b/onnxruntime/core/providers/cpu/tensor/space_depth_ops.h @@ -10,7 +10,7 @@ namespace onnxruntime { class SpaceDepthBase : public OpKernel { public: SpaceDepthBase(const OpKernelInfo& info) : OpKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("blocksize", &blocksize_).IsOK(), + ORT_ENFORCE(info.GetAttr("blocksize", &blocksize_).IsOK(), "Attribute blocksize is not set."); } diff --git a/onnxruntime/core/providers/cpu/tensor/split.cc b/onnxruntime/core/providers/cpu/tensor/split.cc index afc31d6380aef..7c1ac3fb0b310 100644 --- a/onnxruntime/core/providers/cpu/tensor/split.cc +++ b/onnxruntime/core/providers/cpu/tensor/split.cc @@ -31,9 +31,9 @@ Status Split::Compute(OpKernelContext* context) const { else if (data_type == DataTypeImpl::GetType()) { /* Need to update CopyMatrix to support double... status = ComputeImpl(*context, input); */ - ONNXRUNTIME_NOT_IMPLEMENTED("Split operator does not support double yet"); + ORT_NOT_IMPLEMENTED("Split operator does not support double yet"); } else - ONNXRUNTIME_THROW("Invalid data type for Split operator of ", data_type); + ORT_THROW("Invalid data type for Split operator of ", data_type); return status; } @@ -61,7 +61,7 @@ Status Split::ComputeImpl(OpKernelContext& context, const Tensor& input) const { if (split_sizes_.empty()) { // equal split based on number of outputs if (split_dim_size % num_outputs != 0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input cannot be split evenly on selected axis. Input shape=", input_shape, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input cannot be split evenly on selected axis. Input shape=", input_shape, " Axis=", axis_, " NumOutputs=", num_outputs); } @@ -69,7 +69,7 @@ Status Split::ComputeImpl(OpKernelContext& context, const Tensor& input) const { split_sizes = std::vector(num_outputs, split_dim_size / num_outputs); } else { if (split_sizes_.size() != num_outputs || split_size_sum_ != split_dim_size) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Cannot split using values in 'split' attribute. Axis=", axis_, " Input shape=", input_shape, " NumOutputs=", num_outputs, diff --git a/onnxruntime/core/providers/cpu/tensor/split.h b/onnxruntime/core/providers/cpu/tensor/split.h index 1d61d91483ba2..2fdcf1bc250ef 100644 --- a/onnxruntime/core/providers/cpu/tensor/split.h +++ b/onnxruntime/core/providers/cpu/tensor/split.h @@ -15,12 +15,12 @@ class Split final : public OpKernel { Split(const OpKernelInfo& info) : OpKernel(info) { // required with default of 0 if (!info.GetAttr("axis", &axis_).IsOK()) - ONNXRUNTIME_THROW("Missing 'axis' attribute value"); + ORT_THROW("Missing 'axis' attribute value"); // optional if (info.GetAttrs("split", split_sizes_).IsOK()) { split_size_sum_ = std::accumulate(split_sizes_.cbegin(), split_sizes_.cend(), 0LL); - ONNXRUNTIME_ENFORCE(std::all_of(split_sizes_.cbegin(), split_sizes_.cend(), [](int64_t value) { return value > 0; }), + ORT_ENFORCE(std::all_of(split_sizes_.cbegin(), split_sizes_.cend(), [](int64_t value) { return value > 0; }), "Invalid value in 'split' attribute. All values must be > 0"); } } diff --git a/onnxruntime/core/providers/cpu/tensor/squeeze.h b/onnxruntime/core/providers/cpu/tensor/squeeze.h index addd3f95221b8..a5de798274c4c 100644 --- a/onnxruntime/core/providers/cpu/tensor/squeeze.h +++ b/onnxruntime/core/providers/cpu/tensor/squeeze.h @@ -14,7 +14,7 @@ class SqueezeBase { SqueezeBase(const OpKernelInfo& info) { std::vector axes; Status status = info.GetAttrs("axes", axes); - ONNXRUNTIME_ENFORCE(status.IsOK(), "Attribute axes is not set."); + ORT_ENFORCE(status.IsOK(), "Attribute axes is not set."); // Handle out of order and repeating dims. std::sort(axes.begin(), axes.end()); @@ -29,7 +29,7 @@ class SqueezeBase { std::vector output_shape; for (size_t i = 0; i < input_shape.size(); ++i) { if (j < axes.size() && axes[j] == static_cast(i)) { - ONNXRUNTIME_ENFORCE(input_shape[i] == 1, "Dimension of input ", i, + ORT_ENFORCE(input_shape[i] == 1, "Dimension of input ", i, " must be 1 instead of ", input_shape[i]); ++j; continue; diff --git a/onnxruntime/core/providers/cpu/tensor/transpose.cc b/onnxruntime/core/providers/cpu/tensor/transpose.cc index d7828870c4cd0..cb797666a2099 100644 --- a/onnxruntime/core/providers/cpu/tensor/transpose.cc +++ b/onnxruntime/core/providers/cpu/tensor/transpose.cc @@ -89,7 +89,7 @@ template <> Status Transpose::Compute(OpKernelContext* ctx) const { // Get input and output: const Tensor* input_tensor_ptr = ctx->Input(0); - ONNXRUNTIME_ENFORCE(input_tensor_ptr != nullptr); + ORT_ENFORCE(input_tensor_ptr != nullptr); const Tensor& X = *input_tensor_ptr; const TensorShape& input_shape = X.Shape(); const std::vector& input_dims = input_shape.GetDims(); diff --git a/onnxruntime/core/providers/cpu/tensor/transpose.h b/onnxruntime/core/providers/cpu/tensor/transpose.h index 2f8657ff35081..4bee399087a3f 100644 --- a/onnxruntime/core/providers/cpu/tensor/transpose.h +++ b/onnxruntime/core/providers/cpu/tensor/transpose.h @@ -21,9 +21,9 @@ class TransposeBase { // Check that perm_ is a valid permutation of [0,rank-1] for (auto i : perm_) { if ((i < 0) || (i >= gsl::narrow(rank))) - ONNXRUNTIME_THROW("Attribute perm of Transpose has an invalid value. Value ", i, " is outside range."); + ORT_THROW("Attribute perm of Transpose has an invalid value. Value ", i, " is outside range."); if (seen[i]) - ONNXRUNTIME_THROW("Attribute perm of Transpose has an invalid value. Value ", i, " is repeated."); + ORT_THROW("Attribute perm of Transpose has an invalid value. Value ", i, " is repeated."); seen[i] = true; } } diff --git a/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc b/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc index 4a8f406226cf4..997210e49bd03 100644 --- a/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc +++ b/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc @@ -17,7 +17,7 @@ ONNX_CPU_OPERATOR_KERNEL( Status UnsqueezeBase::PrepareCompute(OpKernelContext* ctx, Prepare& p) const { const Tensor* X = ctx->Input(0); - ONNXRUNTIME_ENFORCE(X != nullptr); + ORT_ENFORCE(X != nullptr); auto& input_tensor = *X; // New dimension count is the current dimensions + the number of entries in axes_ @@ -51,7 +51,7 @@ Status UnsqueezeBase::PrepareCompute(OpKernelContext* ctx, Prepare& p) const { Status Unsqueeze::Compute(OpKernelContext* ctx) const { Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareCompute(ctx, p)); + ORT_RETURN_IF_ERROR(PrepareCompute(ctx, p)); CopyCpuTensor(p.input_tensor, p.output_tensor); diff --git a/onnxruntime/core/providers/cpu/tensor/unsqueeze.h b/onnxruntime/core/providers/cpu/tensor/unsqueeze.h index 3e4740bc8ad14..bbe42d98688f6 100644 --- a/onnxruntime/core/providers/cpu/tensor/unsqueeze.h +++ b/onnxruntime/core/providers/cpu/tensor/unsqueeze.h @@ -11,7 +11,7 @@ namespace onnxruntime { class UnsqueezeBase { protected: UnsqueezeBase(const OpKernelInfo& info) { - ONNXRUNTIME_ENFORCE(info.GetAttrs("axes", axes_).IsOK(), "Missing/Invalid 'axes' attribute value"); + ORT_ENFORCE(info.GetAttrs("axes", axes_).IsOK(), "Missing/Invalid 'axes' attribute value"); } struct Prepare { diff --git a/onnxruntime/core/providers/cpu/tensor/upsample.cc b/onnxruntime/core/providers/cpu/tensor/upsample.cc index daefce778890a..e5b6dc938f0e9 100644 --- a/onnxruntime/core/providers/cpu/tensor/upsample.cc +++ b/onnxruntime/core/providers/cpu/tensor/upsample.cc @@ -193,7 +193,7 @@ void upsampleBilinear( template Status Upsample::BaseCompute(OpKernelContext* context, const std::vector& scales) const { const Tensor* X = context->Input(0); - ONNXRUNTIME_ENFORCE(X != nullptr); + ORT_ENFORCE(X != nullptr); const std::vector& dims = X->Shape().GetDims(); if (dims.size() != scales.size()) { @@ -235,7 +235,7 @@ Status Upsample::Compute(OpKernelContext* context) const { } const Tensor* scales = context->Input(1); - ONNXRUNTIME_ENFORCE(scales != nullptr); + ORT_ENFORCE(scales != nullptr); int64_t scales_size = scales->Shape().Size(); std::vector scales_arrary(scales_size); ParseScalesData(scales, scales_arrary); diff --git a/onnxruntime/core/providers/cpu/tensor/upsample.h b/onnxruntime/core/providers/cpu/tensor/upsample.h index 7c2ff978784ff..690008456bef2 100644 --- a/onnxruntime/core/providers/cpu/tensor/upsample.h +++ b/onnxruntime/core/providers/cpu/tensor/upsample.h @@ -19,11 +19,11 @@ class UpsampleBase { protected: UpsampleBase(OpKernelInfo info) { std::string mode; - ONNXRUNTIME_ENFORCE(info.GetAttr("mode", &mode).IsOK()); + ORT_ENFORCE(info.GetAttr("mode", &mode).IsOK()); mode_ = StringToUpsampleMode(mode); if (info.GetInputCount() == 1) { - ONNXRUNTIME_ENFORCE(info.GetAttrs("scales", scales_).IsOK()); + ORT_ENFORCE(info.GetAttrs("scales", scales_).IsOK()); ScalesValidation(scales_, mode_); } } @@ -37,19 +37,19 @@ class UpsampleBase { } else if (strcmp(mode.c_str(), UpsampleModeLinear) == 0) { return UpsampleMode::LINEAR; } else { - ONNXRUNTIME_THROW("mode attribute is " + mode + ". It can only be " + + ORT_THROW("mode attribute is " + mode + ". It can only be " + UpsampleModeNN + "(default) or " + UpsampleModeLinear + "."); } } void ScalesValidation(const std::vector& scales, const UpsampleMode mode) const { for (auto& scale : scales) { - ONNXRUNTIME_ENFORCE(scale >= 1, "Scale value should be greater than or equal to 1."); + ORT_ENFORCE(scale >= 1, "Scale value should be greater than or equal to 1."); } if (UpsampleMode::LINEAR == mode) { - ONNXRUNTIME_ENFORCE(scales.size() == 4, "Upsample: linear mode upsample only support bilinear with 4 dimension."); - ONNXRUNTIME_ENFORCE(((scales[0] == 1) && (scales[1] == 1)), + ORT_ENFORCE(scales.size() == 4, "Upsample: linear mode upsample only support bilinear with 4 dimension."); + ORT_ENFORCE(((scales[0] == 1) && (scales[1] == 1)), "Upsample: linear mode upsample only support bilinear, the first 2 scales should be 1."); } } @@ -78,7 +78,7 @@ class Upsample : public UpsampleBase, public OpKernel { void ParseScalesData(const Tensor* scale, std::vector& scales) const { const float* scale_data = scale->template Data(); int64_t scales_size = scale->Shape().Size(); - ONNXRUNTIME_ENFORCE(scales_size > 0, "scales size should be greater than 0."); + ORT_ENFORCE(scales_size > 0, "scales size should be greater than 0."); memcpy(scales.data(), scale_data, scales_size * sizeof(float)); ScalesValidation(scales, mode_); } diff --git a/onnxruntime/core/providers/cpu/tensor/utils.h b/onnxruntime/core/providers/cpu/tensor/utils.h index ec2976914aeeb..4200595a7f99f 100644 --- a/onnxruntime/core/providers/cpu/tensor/utils.h +++ b/onnxruntime/core/providers/cpu/tensor/utils.h @@ -125,7 +125,7 @@ struct SliceSkips : std::vector { SliceSkips(const Tensor& tensor, gsl::span extents) : std::vector(tensor.Shape().NumDimensions(), 0) { auto& dims = tensor.Shape().GetDims(); - ONNXRUNTIME_ENFORCE(static_cast(dims.size()) == extents.size()); + ORT_ENFORCE(static_cast(dims.size()) == extents.size()); size_t pitch = dims.back(); back() = pitch - extents[size() - 1]; for (size_t i = size() - 1; i-- > 0;) { @@ -142,7 +142,7 @@ struct SliceIterator { SliceIterator(const Tensor& tensor, gsl::span starts, gsl::span extents) : tensor_(tensor), extents_(extents), skips_(tensor, extents), indices_(extents.size(), 0) { auto& dims = tensor_.Shape().GetDims(); - ONNXRUNTIME_ENFORCE(static_cast(dims.size()) == starts.size() && static_cast(dims.size()) == extents.size()); + ORT_ENFORCE(static_cast(dims.size()) == starts.size() && static_cast(dims.size()) == extents.size()); size_t pitch = 1; // Initial skip, so that input_ points to the first element to copy diff --git a/onnxruntime/core/providers/cuda/activation/activations.cc b/onnxruntime/core/providers/cuda/activation/activations.cc index cbdefe8e9905f..7a085156c7a60 100644 --- a/onnxruntime/core/providers/cuda/activation/activations.cc +++ b/onnxruntime/core/providers/cuda/activation/activations.cc @@ -25,7 +25,7 @@ namespace cuda { UnaryElementwise::Prepare(context, &p); \ CudaAsyncBuffer func_ctx(this, 0, MakeFuncCtx()); \ if (!std::is_same::value) \ - ONNXRUNTIME_RETURN_IF_ERROR(func_ctx.CopyToGpu()); \ + ORT_RETURN_IF_ERROR(func_ctx.CopyToGpu()); \ Impl_##x::MappedType>( \ reinterpret_cast::MappedType*>(p.input_tensor->template Data()), \ reinterpret_cast::MappedType*>(p.output_tensor->template MutableData()), \ diff --git a/onnxruntime/core/providers/cuda/activation/activations.h b/onnxruntime/core/providers/cuda/activation/activations.h index 55d1eac1ae5c3..e0580e3310da3 100644 --- a/onnxruntime/core/providers/cuda/activation/activations.h +++ b/onnxruntime/core/providers/cuda/activation/activations.h @@ -44,8 +44,8 @@ template class Affine final : public UnaryElementwise { public: Affine(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -61,7 +61,7 @@ template class Elu final : public UnaryElementwise { public: Elu(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -76,8 +76,8 @@ template class HardSigmoid final : public UnaryElementwise { public: HardSigmoid(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -93,7 +93,7 @@ template class LeakyRelu final : public UnaryElementwise { public: LeakyRelu(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -108,8 +108,8 @@ template class ParametricSoftplus final : public UnaryElementwise { public: ParametricSoftplus(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -136,8 +136,8 @@ template class ScaledTanh final : public UnaryElementwise { public: ScaledTanh(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -153,8 +153,8 @@ template class Selu final : public UnaryElementwise { public: Selu(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("gamma", &gamma_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("gamma", &gamma_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; @@ -214,7 +214,7 @@ template class ThresholdedRelu final : public UnaryElementwise { public: ThresholdedRelu(const OpKernelInfo& info) : UnaryElementwise(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/cuda/cuda_allocator.cc b/onnxruntime/core/providers/cuda/cuda_allocator.cc index 5b0ca35924502..54b88d0d65aff 100644 --- a/onnxruntime/core/providers/cuda/cuda_allocator.cc +++ b/onnxruntime/core/providers/cuda/cuda_allocator.cc @@ -20,7 +20,7 @@ void CUDAAllocator::CheckDevice() const { // if it's expected to change, call cudaSetDevice instead of the check int current_device; CUDA_CALL_THROW(cudaGetDevice(¤t_device)); - ONNXRUNTIME_ENFORCE(current_device == device_id_); + ORT_ENFORCE(current_device == device_id_); #endif } @@ -38,7 +38,7 @@ void CUDAAllocator::Free(void* p) { cudaFree(p); // do not throw error since it's OK for cudaFree to fail during shutdown } -const ONNXRuntimeAllocatorInfo& CUDAAllocator::Info() const { +const OrtAllocatorInfo& CUDAAllocator::Info() const { return info_; } @@ -58,8 +58,8 @@ void CUDAPinnedAllocator::Free(void* p) { CUDA_CALL_THROW(cudaFreeHost(p)); } -const ONNXRuntimeAllocatorInfo& CUDAPinnedAllocator::Info() const { - static constexpr ONNXRuntimeAllocatorInfo cuda_allocator_info(CUDA_PINNED, ONNXRuntimeDeviceAllocator, 0, ONNXRuntimeMemTypeCPUOutput); +const OrtAllocatorInfo& CUDAPinnedAllocator::Info() const { + static constexpr OrtAllocatorInfo cuda_allocator_info(CUDA_PINNED, OrtDeviceAllocator, 0, OrtMemTypeCPUOutput); return cuda_allocator_info; } diff --git a/onnxruntime/core/providers/cuda/cuda_allocator.h b/onnxruntime/core/providers/cuda/cuda_allocator.h index 0cbb4af7367a8..4637cbd7069ed 100644 --- a/onnxruntime/core/providers/cuda/cuda_allocator.h +++ b/onnxruntime/core/providers/cuda/cuda_allocator.h @@ -11,10 +11,10 @@ constexpr const char* CUDA_PINNED = "CudaPinned"; class CUDAAllocator : public IDeviceAllocator { public: - CUDAAllocator(int device_id) : device_id_(device_id), info_(CUDA, ONNXRuntimeAllocatorType::ONNXRuntimeDeviceAllocator, device_id, ONNXRuntimeMemTypeDefault) {} + CUDAAllocator(int device_id) : device_id_(device_id), info_(CUDA, OrtAllocatorType::OrtDeviceAllocator, device_id, OrtMemTypeDefault) {} virtual void* Alloc(size_t size) override; virtual void Free(void* p) override; - virtual const ONNXRuntimeAllocatorInfo& Info() const override; + virtual const OrtAllocatorInfo& Info() const override; virtual FencePtr CreateFence(const SessionState* session_state) override; private: @@ -22,7 +22,7 @@ class CUDAAllocator : public IDeviceAllocator { private: const int device_id_; - const ONNXRuntimeAllocatorInfo info_; + const OrtAllocatorInfo info_; }; //TODO: add a default constructor @@ -30,7 +30,7 @@ class CUDAPinnedAllocator : public IDeviceAllocator { public: virtual void* Alloc(size_t size) override; virtual void Free(void* p) override; - virtual const ONNXRuntimeAllocatorInfo& Info() const override; + virtual const OrtAllocatorInfo& Info() const override; virtual FencePtr CreateFence(const SessionState* session_state) override; }; diff --git a/onnxruntime/core/providers/cuda/cuda_call.cc b/onnxruntime/core/providers/cuda/cuda_call.cc index d5a4b03e384d9..b0f2411ef1c17 100644 --- a/onnxruntime/core/providers/cuda/cuda_call.cc +++ b/onnxruntime/core/providers/cuda/cuda_call.cc @@ -18,7 +18,7 @@ using namespace common; template const char* CudaErrString(ERRTYPE x) { - ONNXRUNTIME_NOT_IMPLEMENTED(); + ORT_NOT_IMPLEMENTED(); } #define CASE_ENUM_TO_STR(x) \ @@ -88,13 +88,13 @@ bool CudaCall(ERRTYPE retCode, const char* exprString, const char* libName, ERRT hostname, exprString, msg); if (THRW) { - ONNXRUNTIME_THROW(str); + ORT_THROW(str); } else { LOGS_DEFAULT(ERROR) << str; } } catch (const std::exception& e) { // catch, log, and rethrow since CUDA code sometimes hangs in destruction, so we'd never get to see the error if (THRW) { - ONNXRUNTIME_THROW(e.what()); + ORT_THROW(e.what()); } else { LOGS_DEFAULT(ERROR) << e.what(); } diff --git a/onnxruntime/core/providers/cuda/cuda_common.h b/onnxruntime/core/providers/cuda/cuda_common.h index e8b440058413c..20c3fc1afae2f 100644 --- a/onnxruntime/core/providers/cuda/cuda_common.h +++ b/onnxruntime/core/providers/cuda/cuda_common.h @@ -15,12 +15,12 @@ namespace onnxruntime { namespace cuda { -#define CUDA_RETURN_IF_ERROR(expr) ONNXRUNTIME_RETURN_IF_ERROR(CUDA_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) -#define CUBLAS_RETURN_IF_ERROR(expr) ONNXRUNTIME_RETURN_IF_ERROR(CUBLAS_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) -#define CUSPARSE_RETURN_IF_ERROR(expr) ONNXRUNTIME_RETURN_IF_ERROR(CUSPARSE_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) -#define CURAND_RETURN_IF_ERROR(expr) ONNXRUNTIME_RETURN_IF_ERROR(CURAND_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) -#define CUDNN_RETURN_IF_ERROR(expr) ONNXRUNTIME_RETURN_IF_ERROR(CUDNN_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) -#define CUDNN2_RETURN_IF_ERROR(expr, m) ONNXRUNTIME_RETURN_IF_ERROR(CUDNN_CALL2(expr, m) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) +#define CUDA_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(CUDA_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) +#define CUBLAS_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(CUBLAS_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) +#define CUSPARSE_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(CUSPARSE_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) +#define CURAND_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(CURAND_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) +#define CUDNN_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(CUDNN_CALL(expr) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) +#define CUDNN2_RETURN_IF_ERROR(expr, m) ORT_RETURN_IF_ERROR(CUDNN_CALL2(expr, m) ? common::Status::OK() : common::Status(common::ONNXRUNTIME, common::FAIL)) // ----------------------------------------------------------------------- // Base class for CUDA kernels @@ -45,7 +45,7 @@ class CudaKernel : public OpKernel { template inline IAllocatorUniquePtr AllocateBufferOnCPUPinned(int id, size_t count_or_bytes) const { - AllocatorPtr allocator = provider_->GetAllocator(id, ONNXRuntimeMemTypeCPU); + AllocatorPtr allocator = provider_->GetAllocator(id, OrtMemTypeCPU); if (!allocator) return nullptr; return IAllocator::MakeUniquePtr(allocator, count_or_bytes); diff --git a/onnxruntime/core/providers/cuda/cuda_execution_provider.cc b/onnxruntime/core/providers/cuda/cuda_execution_provider.cc index a7982354732f7..40a60c0909d5e 100644 --- a/onnxruntime/core/providers/cuda/cuda_execution_provider.cc +++ b/onnxruntime/core/providers/cuda/cuda_execution_provider.cc @@ -22,7 +22,7 @@ ONNX_OPERATOR_KERNEL_EX( 1, kCudaExecutionProvider, KernelDefBuilder() - .InputMemoryType(0) + .InputMemoryType(0) .ExecQueueId(kCudaStreamCopyIn) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Memcpy); @@ -33,7 +33,7 @@ ONNX_OPERATOR_KERNEL_EX( 1, kCudaExecutionProvider, KernelDefBuilder() - .OutputMemoryType(0) + .OutputMemoryType(0) .ExecQueueId(kCudaStreamCopyOut) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Memcpy); @@ -63,16 +63,16 @@ CUDAExecutionProvider::CUDAExecutionProvider(const CUDAExecutionProviderInfo& in CUDA_CALL_THROW(cudaStreamCreateWithFlags(&streams_[kCudaStreamCopyOut], cudaStreamNonBlocking)); DeviceAllocatorRegistrationInfo default_allocator_info( - {ONNXRuntimeMemTypeDefault, [](int id) { return std::make_unique(id); }, std::numeric_limits::max()}); + {OrtMemTypeDefault, [](int id) { return std::make_unique(id); }, std::numeric_limits::max()}); InsertAllocator(CreateAllocator(default_allocator_info, device_id_)); DeviceAllocatorRegistrationInfo pinned_allocator_info( - {ONNXRuntimeMemTypeCPUOutput, [](int) { return std::make_unique(); }, std::numeric_limits::max()}); + {OrtMemTypeCPUOutput, [](int) { return std::make_unique(); }, std::numeric_limits::max()}); InsertAllocator(CreateAllocator(pinned_allocator_info, device_id_)); } CUDAExecutionProvider::~CUDAExecutionProvider() { - auto cpu_alloc = GetAllocator(0, ONNXRuntimeMemTypeCPU); + auto cpu_alloc = GetAllocator(0, OrtMemTypeCPU); std::lock_guard lock(deferred_release_cpu_ptr_mutex_); auto it = deferred_release_cpu_ptr_.begin(); while (it != deferred_release_cpu_ptr_.end()) { @@ -105,16 +105,16 @@ void CUDAExecutionProvider::ReleasePerThreadStuffs() const { } } -AllocatorPtr CUDAExecutionProvider::GetAllocator(int id, ONNXRuntimeMemType mem_type) const { +AllocatorPtr CUDAExecutionProvider::GetAllocator(int id, OrtMemType mem_type) const { // Pinned memory allocator is shared between threads, but CUDA memory allocator is per-thread or it may cause result changes // A hypothesis is that arena allocator is not aligned with CUDA output cache, and data from different kernel writes may // cause cacheline to contain dirty data. - if (mem_type == ONNXRuntimeMemTypeDefault) { + if (mem_type == OrtMemTypeDefault) { if (!per_thread_default_allocator_) { std::lock_guard lock(default_allocator_pool_mutex_); if (default_allocator_pool_.empty()) { DeviceAllocatorRegistrationInfo default_allocator_info( - {ONNXRuntimeMemTypeDefault, + {OrtMemTypeDefault, [](int id) { return std::make_unique(id); }, std::numeric_limits::max()}); per_thread_default_allocator_ = CreateAllocator(default_allocator_info, device_id_); } else { @@ -141,13 +141,13 @@ void CUDAExecutionProvider::AddDeferredReleaseCPUPtr(void* p) { if (current_deferred_release_event) { std::lock_guard lock(deferred_release_cpu_ptr_mutex_); auto iter = deferred_release_cpu_ptr_.find(current_deferred_release_event); - ONNXRUNTIME_ENFORCE(iter != deferred_release_cpu_ptr_.end()); + ORT_ENFORCE(iter != deferred_release_cpu_ptr_.end()); iter->second.cpu_ptrs.push_back(p); } } Status CUDAExecutionProvider::OnRunStart() { - auto cpu_alloc = GetAllocator(0, ONNXRuntimeMemTypeCPU); + auto cpu_alloc = GetAllocator(0, OrtMemTypeCPU); // check if cudaEvents has passed for deferred release // note that we need to take a mutex in case of multi-threaded Run() std::lock_guard lock(deferred_release_cpu_ptr_mutex_); @@ -184,7 +184,7 @@ Status CUDAExecutionProvider::OnRunStart() { } Status CUDAExecutionProvider::OnRunEnd() { - ONNXRUNTIME_RETURN_IF_NOT(per_thread_context_ != nullptr); + ORT_RETURN_IF_NOT(per_thread_context_ != nullptr); // record deferred release event on default stream, and release per_thread_context auto current_deferred_release_event = per_thread_context_->GetCurrentDeferredReleaseEvent(); CUDA_RETURN_IF_ERROR(cudaEventRecord(current_deferred_release_event, nullptr)); @@ -205,7 +205,7 @@ Status CUDAExecutionProvider::CopyTensor(const Tensor& src, Tensor& dst, int exe if (strcmp(src.Location().name, CUDA) != 0 && strcmp(src.Location().name, CUDA_PINNED) != 0 && strcmp(dst.Location().name, CUDA) != 0 && strcmp(dst.Location().name, CUDA_PINNED) != 0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Unsupported tensor location: src_location is: ", src.Location().name, " and dst_location is: ", dst.Location().name); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Unsupported tensor location: src_location is: ", src.Location().name, " and dst_location is: ", dst.Location().name); } size_t bytes = src.DataType()->Size() * src.Shape().Size(); diff --git a/onnxruntime/core/providers/cuda/cuda_execution_provider.h b/onnxruntime/core/providers/cuda/cuda_execution_provider.h index 30adef2e33650..9d2af6b416a73 100644 --- a/onnxruntime/core/providers/cuda/cuda_execution_provider.h +++ b/onnxruntime/core/providers/cuda/cuda_execution_provider.h @@ -29,7 +29,7 @@ class CUDAExecutionProvider : public IExecutionProvider { explicit CUDAExecutionProvider(const CUDAExecutionProviderInfo& info); virtual ~CUDAExecutionProvider(); - AllocatorPtr GetAllocator(int id, ONNXRuntimeMemType mem_type = ONNXRuntimeMemTypeDefault) const override; + AllocatorPtr GetAllocator(int id, OrtMemType mem_type = OrtMemTypeDefault) const override; std::string Type() const override { return onnxruntime::kCudaExecutionProvider; @@ -66,7 +66,7 @@ class CUDAExecutionProvider : public IExecutionProvider { } cudaStream_t GetStream(int queue_id) const { - ONNXRUNTIME_ENFORCE(queue_id >= 0 && queue_id < kTotalCudaStreams); + ORT_ENFORCE(queue_id >= 0 && queue_id < kTotalCudaStreams); return streams_[queue_id]; } @@ -85,7 +85,7 @@ class CUDAExecutionProvider : public IExecutionProvider { if (count_or_bytes == 0) return nullptr; - return IAllocator::MakeUniquePtr(GetAllocator(ONNXRuntimeMemTypeDefault), count_or_bytes); + return IAllocator::MakeUniquePtr(GetAllocator(OrtMemTypeDefault), count_or_bytes); } virtual std::shared_ptr GetKernelRegistry() const override; diff --git a/onnxruntime/core/providers/cuda/cuda_provider_factory.cc b/onnxruntime/core/providers/cuda/cuda_provider_factory.cc index e25635b337dbe..c9030b27c64ff 100644 --- a/onnxruntime/core/providers/cuda/cuda_provider_factory.cc +++ b/onnxruntime/core/providers/cuda/cuda_provider_factory.cc @@ -9,35 +9,35 @@ using namespace onnxruntime; namespace { struct CUDAProviderFactory { - const ONNXRuntimeProviderFactoryInterface* const cls; + const OrtProviderFactoryInterface* const cls; std::atomic_int ref_count; int device_id; CUDAProviderFactory(); }; -ONNXStatus* ONNXRUNTIME_API_CALL CreateCuda(void* this_, ONNXRuntimeProvider** out) { +ONNXStatus* ORT_API_CALL CreateCuda(void* this_, OrtProvider** out) { CUDAExecutionProviderInfo info; CUDAProviderFactory* this_ptr = (CUDAProviderFactory*)this_; info.device_id = this_ptr->device_id; CUDAExecutionProvider* ret = new CUDAExecutionProvider(info); - *out = (ONNXRuntimeProvider*)ret; + *out = (OrtProvider*)ret; return nullptr; } -uint32_t ONNXRUNTIME_API_CALL ReleaseCuda(void* this_) { +uint32_t ORT_API_CALL ReleaseCuda(void* this_) { CUDAProviderFactory* this_ptr = (CUDAProviderFactory*)this_; if (--this_ptr->ref_count == 0) delete this_ptr; return 0; } -uint32_t ONNXRUNTIME_API_CALL AddRefCuda(void* this_) { +uint32_t ORT_API_CALL AddRefCuda(void* this_) { CUDAProviderFactory* this_ptr = (CUDAProviderFactory*)this_; ++this_ptr->ref_count; return 0; } -constexpr ONNXRuntimeProviderFactoryInterface cuda_cls = { +constexpr OrtProviderFactoryInterface cuda_cls = { AddRefCuda, ReleaseCuda, CreateCuda, @@ -46,9 +46,9 @@ constexpr ONNXRuntimeProviderFactoryInterface cuda_cls = { CUDAProviderFactory::CUDAProviderFactory() : cls(&cuda_cls), ref_count(1), device_id(0) {} } // namespace -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateCUDAExecutionProviderFactory, int device_id, _Out_ ONNXRuntimeProviderFactoryInterface*** out) { +ORT_API_STATUS_IMPL(OrtCreateCUDAExecutionProviderFactory, int device_id, _Out_ OrtProviderFactoryInterface*** out) { CUDAProviderFactory* ret = new CUDAProviderFactory(); ret->device_id = device_id; - *out = (ONNXRuntimeProviderFactoryInterface**)ret; + *out = (OrtProviderFactoryInterface**)ret; return nullptr; } diff --git a/onnxruntime/core/providers/cuda/cudnn_common.cc b/onnxruntime/core/providers/cuda/cudnn_common.cc index 0d09fcdb328b2..ff3a09e73af46 100644 --- a/onnxruntime/core/providers/cuda/cudnn_common.cc +++ b/onnxruntime/core/providers/cuda/cudnn_common.cc @@ -27,7 +27,7 @@ Status CudnnTensor::CreateTensorIfNeeded() { } Status CudnnTensor::Set(const std::vector& input_dims, cudnnDataType_t dataType) { - ONNXRUNTIME_RETURN_IF_ERROR(CreateTensorIfNeeded()); + ORT_RETURN_IF_ERROR(CreateTensorIfNeeded()); int rank = gsl::narrow_cast(input_dims.size()); TensorPitches pitches(input_dims); @@ -42,7 +42,7 @@ Status CudnnTensor::Set(const std::vector& input_dims, cudnnDataType_t } Status CudnnTensor::Set(const CudnnTensor& x_desc, cudnnBatchNormMode_t mode) { - ONNXRUNTIME_RETURN_IF_ERROR(CreateTensorIfNeeded()); + ORT_RETURN_IF_ERROR(CreateTensorIfNeeded()); CUDNN_RETURN_IF_ERROR(cudnnDeriveBNTensorDescriptor(tensor_, x_desc, mode)); return Status::OK(); } @@ -56,7 +56,7 @@ cudnnDataType_t CudnnTensor::GetDataType() { else if (typeid(ElemType) == typeid(half)) return CUDNN_DATA_HALF; else - ONNXRUNTIME_THROW("cuDNN engine currently supports only single/double/half precision data types."); + ORT_THROW("cuDNN engine currently supports only single/double/half precision data types."); } CudnnFilterDescriptor::CudnnFilterDescriptor() : desc_(nullptr) { diff --git a/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.cc b/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.cc index 0cd19bb4ac3ca..cbcf8e25a1f46 100644 --- a/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.cc +++ b/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.cc @@ -12,7 +12,7 @@ Status BinaryElementwise::Prepare(OpKernelContext* context, p->lhs_tensor = context->Input(0); p->rhs_tensor = context->Input(1); if (!(p->lhs_tensor->Shape() == p->rhs_tensor->Shape())) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, Node().Name(), ": mismatching input shapes: ", p->lhs_tensor->Shape().ToString(), " != ", p->rhs_tensor->Shape().ToString()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, Node().Name(), ": mismatching input shapes: ", p->lhs_tensor->Shape().ToString(), " != ", p->rhs_tensor->Shape().ToString()); p->output_tensor = context->Output(0, p->lhs_tensor->Shape()); p->output_rank_or_simple_broadcast = static_cast(SimpleBroadcast::NoBroadcast); return Status::OK(); @@ -33,10 +33,10 @@ static Status ComputeOutputShape(const std::string& node_name, const TensorShape rhs_dim = rhs_shape[rhs_rank - 1 - i]; int64_t out_dim = std::max(lhs_dim, rhs_dim); if (lhs_dim != out_dim && lhs_dim != 1) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, node_name, ": left operand cannot broadcast on dim ", lhs_rank - 1 - i, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, node_name, ": left operand cannot broadcast on dim ", lhs_rank - 1 - i, " LeftShape: ", lhs_shape.ToString(), ", RightShape: ", rhs_shape.ToString()); if (rhs_dim != out_dim && rhs_dim != 1) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, node_name, ": right operand cannot broadcast on dim ", rhs_rank - 1 - i, + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, node_name, ": right operand cannot broadcast on dim ", rhs_rank - 1 - i, " LeftShape: ", lhs_shape.ToString(), ", RightShape: ", rhs_shape.ToString()); output_dims[out_rank - 1 - i] = out_dim; } @@ -59,7 +59,7 @@ Status BinaryElementwiseBroadcastPrepare( p->output_tensor = output_tensor; const auto& output_shape = output_tensor->Shape(); - ONNXRUNTIME_RETURN_IF_ERROR(p->BinaryElementwiseBroadcastPrepareHelper(device_id, lhs_shape, rhs_shape, output_shape)); + ORT_RETURN_IF_ERROR(p->BinaryElementwiseBroadcastPrepareHelper(device_id, lhs_shape, rhs_shape, output_shape)); return Status::OK(); } @@ -72,10 +72,10 @@ Status BinaryElementwise::Prepare(OpKernelContext* context, int const auto& rhs_shape = rhs_tensor->Shape(); TensorShape output_shape; - ONNXRUNTIME_RETURN_IF_ERROR(ComputeOutputShape(Node().Name(), lhs_shape, rhs_shape, output_shape)); + ORT_RETURN_IF_ERROR(ComputeOutputShape(Node().Name(), lhs_shape, rhs_shape, output_shape)); auto output_tensor = context->Output(0, output_shape); - ONNXRUNTIME_RETURN_IF_ERROR(BinaryElementwiseBroadcastPrepare(device_id, lhs_tensor, rhs_tensor, output_tensor, p)); + ORT_RETURN_IF_ERROR(BinaryElementwiseBroadcastPrepare(device_id, lhs_tensor, rhs_tensor, output_tensor, p)); return Status::OK(); } @@ -106,7 +106,7 @@ Status BinaryElementwise::Prepare(OpKernelContext* context, int Status x::ComputeInternal(OpKernelContext* context) const { \ BinaryElementwisePreparation prepare(this); \ Prepare(context, 0, &prepare); \ - ONNXRUNTIME_RETURN_IF_ERROR(prepare.CopyToGpu()); \ + ORT_RETURN_IF_ERROR(prepare.CopyToGpu()); \ Impl_##x::MappedType>( \ prepare.output_rank_or_simple_broadcast, \ prepare.lhs_padded_strides.GpuPtr(), \ @@ -192,7 +192,7 @@ Status Sum::ComputeInternal(OpKernelContext* context) const { const auto& node = Node(); const auto& node_name = node.Name(); auto input_count = node.InputArgCount().front(); - ONNXRUNTIME_RETURN_IF_NOT(input_count >= 1, "Must have 1 or more inputs"); + ORT_RETURN_IF_NOT(input_count >= 1, "Must have 1 or more inputs"); if (input_count == 1) { auto input_tensor = context->Input(0); @@ -202,16 +202,16 @@ Status Sum::ComputeInternal(OpKernelContext* context) const { } else { // compute output shape first, using broadcast rule TensorShape output_shape; - ONNXRUNTIME_RETURN_IF_ERROR(ComputeOutputShape(node_name, context->Input(0)->Shape(), context->Input(1)->Shape(), output_shape)); + ORT_RETURN_IF_ERROR(ComputeOutputShape(node_name, context->Input(0)->Shape(), context->Input(1)->Shape(), output_shape)); for (int index = 2; index < input_count; index++) { TensorShape previous_output_shape = output_shape; - ONNXRUNTIME_RETURN_IF_ERROR(ComputeOutputShape(node_name, previous_output_shape, context->Input(index)->Shape(), output_shape)); + ORT_RETURN_IF_ERROR(ComputeOutputShape(node_name, previous_output_shape, context->Input(index)->Shape(), output_shape)); } Tensor* output_tensor = context->Output(0, output_shape); BinaryElementwisePreparation prepare(this); if (input_count == 2) { // special case for 2 tensors to avoid memset zero - ONNXRUNTIME_RETURN_IF_ERROR(BinaryElementwiseBroadcastPrepare(0, context->Input(0), context->Input(1), output_tensor, &prepare)); + ORT_RETURN_IF_ERROR(BinaryElementwiseBroadcastPrepare(0, context->Input(0), context->Input(1), output_tensor, &prepare)); Impl_Add( prepare.output_rank_or_simple_broadcast, prepare.lhs_padded_strides.GpuPtr(), @@ -227,7 +227,7 @@ Status Sum::ComputeInternal(OpKernelContext* context) const { // for more than 2 inputs, we need to accumulate into output tensor, as the shape from input0 + input1 might be different from output shape CUDA_RETURN_IF_ERROR(cudaMemset(output_tensor->MutableDataRaw(), 0, output_shape.Size() * sizeof(CudaT))); for (int index = 0; index < input_count; index++) { - ONNXRUNTIME_RETURN_IF_ERROR(BinaryElementwiseBroadcastPrepare(0, output_tensor, context->Input(index), output_tensor, &prepare)); + ORT_RETURN_IF_ERROR(BinaryElementwiseBroadcastPrepare(0, output_tensor, context->Input(index), output_tensor, &prepare)); Impl_Add( prepare.output_rank_or_simple_broadcast, prepare.lhs_padded_strides.GpuPtr(), diff --git a/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.h b/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.h index a8517d9ee83d6..347e1261b713e 100644 --- a/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.h +++ b/onnxruntime/core/providers/cuda/math/binary_elementwise_ops.h @@ -28,9 +28,9 @@ struct BinaryElementwisePreparation { fdm_output_strides(op_kernel) {} Status CopyToGpu() { - ONNXRUNTIME_RETURN_IF_ERROR(lhs_padded_strides.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(rhs_padded_strides.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(lhs_padded_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(rhs_padded_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); return Status::OK(); } @@ -83,20 +83,20 @@ struct BinaryElementwisePreparation { // compute strides with 1 more dim than out_rank, and use strides[0] == strides[1] // to decide if dim0 needs broadcast lhs_padded_strides.AllocCpuPtr(device_id, out_rank + 1); - ONNXRUNTIME_RETURN_IF_NOT(TensorPitches::Calculate(lhs_padded_strides.CpuSpan(), lhs_shape.GetDims())); + ORT_RETURN_IF_NOT(TensorPitches::Calculate(lhs_padded_strides.CpuSpan(), lhs_shape.GetDims())); if (lhs_shape[0] > 1 && lhs_rank == out_rank) lhs_padded_strides.CpuPtr()[0] = 0; } if (rhs_shape != output_shape) { rhs_padded_strides.AllocCpuPtr(device_id, out_rank + 1); - ONNXRUNTIME_RETURN_IF_NOT(TensorPitches::Calculate(rhs_padded_strides.CpuSpan(), rhs_shape.GetDims())); + ORT_RETURN_IF_NOT(TensorPitches::Calculate(rhs_padded_strides.CpuSpan(), rhs_shape.GetDims())); if (rhs_shape[0] > 1 && rhs_rank == out_rank) rhs_padded_strides.CpuPtr()[0] = 0; } fdm_output_strides.AllocCpuPtr(device_id, out_rank); - ONNXRUNTIME_RETURN_IF_NOT(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_shape.GetDims())); + ORT_RETURN_IF_NOT(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_shape.GetDims())); return Status::OK(); } }; diff --git a/onnxruntime/core/providers/cuda/math/gemm.h b/onnxruntime/core/providers/cuda/math/gemm.h index 8c9895f1c42af..0ddc85b643a99 100644 --- a/onnxruntime/core/providers/cuda/math/gemm.h +++ b/onnxruntime/core/providers/cuda/math/gemm.h @@ -14,14 +14,14 @@ class Gemm final : public CudaKernel { public: Gemm(const OpKernelInfo& info) : CudaKernel(info) { int64_t temp; - ONNXRUNTIME_ENFORCE(info.GetAttr("transA", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("transA", &temp).IsOK()); trans_A_ = (temp != 0); - ONNXRUNTIME_ENFORCE(info.GetAttr("transB", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("transB", &temp).IsOK()); trans_B_ = (temp != 0); - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/cuda/math/matmul.cc b/onnxruntime/core/providers/cuda/math/matmul.cc index 7372ccd2b3380..1b91d5564e8d9 100644 --- a/onnxruntime/core/providers/cuda/math/matmul.cc +++ b/onnxruntime/core/providers/cuda/math/matmul.cc @@ -32,10 +32,10 @@ Status MatMul::ComputeInternal(OpKernelContext* ctx) const { const Tensor* right_X = ctx->Input(1); MatMulComputeHelper helper; - ONNXRUNTIME_RETURN_IF_ERROR(helper.Compute(left_X->Shape(), right_X->Shape())); + ORT_RETURN_IF_ERROR(helper.Compute(left_X->Shape(), right_X->Shape())); Tensor* Y = ctx->Output(0, helper.OutputShape()); - ONNXRUNTIME_RETURN_IF_NOT(strcmp(Y->Location().name, CUDA) == 0, "Output should be allocated on CUDA"); + ORT_RETURN_IF_NOT(strcmp(Y->Location().name, CUDA) == 0, "Output should be allocated on CUDA"); CudaT one = ToCudaType::FromFloat(1.0f); CudaT zero = ToCudaType::FromFloat(0.0f); @@ -65,9 +65,9 @@ Status MatMul::ComputeInternal(OpKernelContext* ctx) const { MatMulComputeHelper::OffsetToArrays(reinterpret_cast(left_X->template Data()), helper.LeftOffsets(), left_arrays.CpuSpan()); MatMulComputeHelper::OffsetToArrays(reinterpret_cast(right_X->template Data()), helper.RightOffsets(), right_arrays.CpuSpan()); MatMulComputeHelper::OffsetToArrays(reinterpret_cast(Y->template MutableData()), helper.OutputOffsets(), output_arrays.CpuSpan()); - ONNXRUNTIME_RETURN_IF_ERROR(left_arrays.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(right_arrays.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(output_arrays.CopyToGpu()); + ORT_RETURN_IF_ERROR(left_arrays.CopyToGpu()); + ORT_RETURN_IF_ERROR(right_arrays.CopyToGpu()); + ORT_RETURN_IF_ERROR(output_arrays.CopyToGpu()); // note that onnxruntime MLValue is row major, while cublas is column major, // so swap left/right operands diff --git a/onnxruntime/core/providers/cuda/math/softmax.cc b/onnxruntime/core/providers/cuda/math/softmax.cc index b444e2a25bba0..47ba79acd5f86 100644 --- a/onnxruntime/core/providers/cuda/math/softmax.cc +++ b/onnxruntime/core/providers/cuda/math/softmax.cc @@ -39,8 +39,8 @@ Status Softmax::ComputeInternal(OpKernelContext* ctx) const { const auto beta = Consts::Zero; CudnnTensor input_tensor; CudnnTensor output_tensor; - ONNXRUNTIME_RETURN_IF_ERROR(input_tensor.Set(dims, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(output_tensor.Set(dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(input_tensor.Set(dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(output_tensor.Set(dims, CudnnTensor::GetDataType())); CUDNN_RETURN_IF_ERROR(cudnnSoftmaxForward(CudnnHandle(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, input_tensor, x_data, &beta, output_tensor, y_data)); return Status::OK(); diff --git a/onnxruntime/core/providers/cuda/nn/batch_norm.cc b/onnxruntime/core/providers/cuda/nn/batch_norm.cc index 0cc7623aafb03..9a551739c1cf4 100644 --- a/onnxruntime/core/providers/cuda/nn/batch_norm.cc +++ b/onnxruntime/core/providers/cuda/nn/batch_norm.cc @@ -34,7 +34,7 @@ Status BatchNorm::ComputeInternal(OpKernelContext* p_op_kernel_context) const const Tensor* mean = p_op_kernel_context->Input(3); const Tensor* var = p_op_kernel_context->Input(4); - ONNXRUNTIME_RETURN_IF_ERROR(BatchNormHelper::ValidateInputs(X, scale, B, mean, var)); + ORT_RETURN_IF_ERROR(BatchNormHelper::ValidateInputs(X, scale, B, mean, var)); const TensorShape& x_shape = X->Shape(); Tensor* Y = p_op_kernel_context->Output(0, x_shape); @@ -49,10 +49,10 @@ Status BatchNorm::ComputeInternal(OpKernelContext* p_op_kernel_context) const CudnnTensor data_desc; vector new_dims; BatchNormHelper::NormalizeDims(x_shape, new_dims); - ONNXRUNTIME_RETURN_IF_ERROR(data_desc.Set(new_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(data_desc.Set(new_dims, CudnnTensor::GetDataType())); CudnnTensor bn_tensor_desc; - ONNXRUNTIME_RETURN_IF_ERROR(bn_tensor_desc.Set(data_desc, cudnn_batch_norm_mode_)); + ORT_RETURN_IF_ERROR(bn_tensor_desc.Set(data_desc, cudnn_batch_norm_mode_)); const auto alpha = Consts::One; const auto beta = Consts::Zero; diff --git a/onnxruntime/core/providers/cuda/nn/batch_norm.h b/onnxruntime/core/providers/cuda/nn/batch_norm.h index 162ef6d6fe693..8ab182ff73aa2 100644 --- a/onnxruntime/core/providers/cuda/nn/batch_norm.h +++ b/onnxruntime/core/providers/cuda/nn/batch_norm.h @@ -16,7 +16,7 @@ class BatchNorm final : public CudaKernel { : CudaKernel{op_kernel_info}, cudnn_batch_norm_mode_(CUDNN_BATCHNORM_SPATIAL) { float tmp_epsilon; - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("epsilon", &tmp_epsilon).IsOK()); + ORT_ENFORCE(op_kernel_info.GetAttr("epsilon", &tmp_epsilon).IsOK()); epsilon_ = ClampCudnnBatchNormEpsilon(tmp_epsilon); // spatial or not diff --git a/onnxruntime/core/providers/cuda/nn/conv.cc b/onnxruntime/core/providers/cuda/nn/conv.cc index 192acfcc2ba83..8135cca4e5d4e 100644 --- a/onnxruntime/core/providers/cuda/nn/conv.cc +++ b/onnxruntime/core/providers/cuda/nn/conv.cc @@ -57,7 +57,7 @@ Status Conv::ComputeInternal(OpKernelContext* context) const { const int64_t N = X->Shape()[0]; const int64_t M = W->Shape()[0]; - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInputShape(X, W)); + ORT_RETURN_IF_ERROR(ValidateInputShape(X, W)); std::vector kernel_shape = ComputeKernelShape(W->Shape()); auto rank = kernel_shape.size(); @@ -76,7 +76,7 @@ Status Conv::ComputeInternal(OpKernelContext* context) const { std::vector y_dims; y_dims.insert(y_dims.begin(), {N, M}); - ONNXRUNTIME_RETURN_IF_ERROR(InferOutputShape(x_shape.Slice(2), kernel_shape, strides, dilations, &pads, &y_dims)); + ORT_RETURN_IF_ERROR(InferOutputShape(x_shape.Slice(2), kernel_shape, strides, dilations, &pads, &y_dims)); s_.y_dims = y_dims; std::vector x_dims_cudnn = x_dims; @@ -92,14 +92,14 @@ Status Conv::ComputeInternal(OpKernelContext* context) const { strides.push_back(1); dilations.push_back(1); } - ONNXRUNTIME_RETURN_IF_ERROR(s_.x_tensor.Set(x_dims_cudnn, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(s_.y_tensor.Set(y_dims_cudnn, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.x_tensor.Set(x_dims_cudnn, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.y_tensor.Set(y_dims_cudnn, CudnnTensor::GetDataType())); if (w_dims_changed) - ONNXRUNTIME_RETURN_IF_ERROR(s_.filter_desc.Set(w_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.filter_desc.Set(w_dims, CudnnTensor::GetDataType())); cudnnConvolutionMode_t mode = CUDNN_CROSS_CORRELATION; - ONNXRUNTIME_RETURN_IF_ERROR(s_.conv_desc.Set(kernel_shape.size(), pads, strides, dilations, mode, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.conv_desc.Set(kernel_shape.size(), pads, strides, dilations, mode, CudnnTensor::GetDataType())); CUDNN_RETURN_IF_ERROR(cudnnSetConvolutionGroupCount(s_.conv_desc, gsl::narrow_cast(group_))); IAllocatorUniquePtr algo_search_workspace = GetScratchBuffer(AlgoSearchWorkspaceSize); @@ -107,14 +107,14 @@ Status Conv::ComputeInternal(OpKernelContext* context) const { if (has_bias) { const Tensor* B = context->Input(2); const auto& b_shape = B->Shape(); - ONNXRUNTIME_RETURN_IF_NOT(b_shape.NumDimensions() == 1, "bias should be 1D"); + ORT_RETURN_IF_NOT(b_shape.NumDimensions() == 1, "bias should be 1D"); std::vector b_dims(2 + kernel_shape.size()); b_dims[0] = 1; // N b_dims[1] = b_shape[0]; // C for (int i = 0; i < kernel_shape.size(); i++) b_dims[2 + i] = 1; - ONNXRUNTIME_RETURN_IF_ERROR(s_.b_tensor.Set(b_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.b_tensor.Set(b_dims, CudnnTensor::GetDataType())); } Tensor* Y = context->Output(0, TensorShape(s_.y_dims)); diff --git a/onnxruntime/core/providers/cuda/nn/conv.h b/onnxruntime/core/providers/cuda/nn/conv.h index 08278978168b8..298d65a8bd834 100644 --- a/onnxruntime/core/providers/cuda/nn/conv.h +++ b/onnxruntime/core/providers/cuda/nn/conv.h @@ -59,10 +59,10 @@ class Conv : public CudaKernel, public ConvBase { public: Conv(const OpKernelInfo& info) : CudaKernel(info), ConvBase(info) { auto pads_size = pads_.size(); - ONNXRUNTIME_ENFORCE(pads_size % 2 == 0); + ORT_ENFORCE(pads_size % 2 == 0); auto rank = pads_size / 2; for (size_t i = 0; i < rank; i++) { - ONNXRUNTIME_ENFORCE(pads_[i] == pads_[i + rank], "cudnn only supports symmetric padding"); + ORT_ENFORCE(pads_[i] == pads_[i + rank], "cudnn only supports symmetric padding"); } } diff --git a/onnxruntime/core/providers/cuda/nn/conv_transpose.cc b/onnxruntime/core/providers/cuda/nn/conv_transpose.cc index f39a23338340c..c68a8aa04166e 100644 --- a/onnxruntime/core/providers/cuda/nn/conv_transpose.cc +++ b/onnxruntime/core/providers/cuda/nn/conv_transpose.cc @@ -52,33 +52,33 @@ Status ConvTranspose::ComputeInternal(OpKernelContext* context) const { s_.last_w_dims = w_dims; Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(context, has_bias, p)); + ORT_RETURN_IF_ERROR(PrepareForCompute(context, has_bias, p)); const auto& y_dims = p.Y->Shape().GetDims(); s_.y_dims = y_dims; - ONNXRUNTIME_RETURN_IF_ERROR(s_.x_tensor.Set(x_dims, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(s_.y_tensor.Set(y_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.x_tensor.Set(x_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.y_tensor.Set(y_dims, CudnnTensor::GetDataType())); if (w_dims_changed) - ONNXRUNTIME_RETURN_IF_ERROR(s_.filter_desc.Set(w_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.filter_desc.Set(w_dims, CudnnTensor::GetDataType())); cudnnConvolutionMode_t mode = CUDNN_CROSS_CORRELATION; - ONNXRUNTIME_RETURN_IF_ERROR(s_.conv_desc.Set(p.kernel_shape.size(), p.pads, p.strides, p.dilations, mode, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.conv_desc.Set(p.kernel_shape.size(), p.pads, p.strides, p.dilations, mode, CudnnTensor::GetDataType())); CUDNN_RETURN_IF_ERROR(cudnnSetConvolutionGroupCount(s_.conv_desc, gsl::narrow_cast(group_))); IAllocatorUniquePtr algo_search_workspace = GetScratchBuffer(AlgoSearchWorkspaceSize); if (has_bias) { const auto& b_shape = p.B->Shape(); - ONNXRUNTIME_RETURN_IF_NOT(b_shape.NumDimensions() == 1, "bias should be 1D"); + ORT_RETURN_IF_NOT(b_shape.NumDimensions() == 1, "bias should be 1D"); std::vector b_dims(2 + p.kernel_shape.size()); b_dims[0] = 1; // N b_dims[1] = b_shape[0]; // C for (size_t i = 0; i < p.kernel_shape.size(); i++) b_dims[2 + i] = 1; - ONNXRUNTIME_RETURN_IF_ERROR(s_.b_tensor.Set(b_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(s_.b_tensor.Set(b_dims, CudnnTensor::GetDataType())); } y_data = reinterpret_cast(p.Y->template MutableData()); diff --git a/onnxruntime/core/providers/cuda/nn/instance_norm.cc b/onnxruntime/core/providers/cuda/nn/instance_norm.cc index 65c868cc91eb2..1bd1d236f7169 100644 --- a/onnxruntime/core/providers/cuda/nn/instance_norm.cc +++ b/onnxruntime/core/providers/cuda/nn/instance_norm.cc @@ -28,7 +28,7 @@ template InstanceNorm::InstanceNorm(const OpKernelInfo& op_kernel_info) : CudaKernel(op_kernel_info) { float tmp_epsilon; - ONNXRUNTIME_ENFORCE(op_kernel_info.GetAttr("epsilon", &tmp_epsilon).IsOK()); + ORT_ENFORCE(op_kernel_info.GetAttr("epsilon", &tmp_epsilon).IsOK()); epsilon_ = ClampCudnnBatchNormEpsilon(tmp_epsilon); } @@ -40,7 +40,7 @@ Status InstanceNorm::ComputeInternal(OpKernelContext* p_op_kernel_context) co const Tensor* scale = p_op_kernel_context->Input(1); const Tensor* bias = p_op_kernel_context->Input(2); - ONNXRUNTIME_RETURN_IF_ERROR(InstanceNormHelper::ValidateInputs(X, scale, bias)); + ORT_RETURN_IF_ERROR(InstanceNormHelper::ValidateInputs(X, scale, bias)); const TensorShape& x_shape = X->Shape(); Tensor* Y = p_op_kernel_context->Output(0, x_shape); @@ -63,10 +63,10 @@ Status InstanceNorm::ComputeInternal(OpKernelContext* p_op_kernel_context) co CudnnTensor data_desc; std::vector new_dims; BatchNormHelper::NormalizeDims(x_shape, new_dims); - ONNXRUNTIME_RETURN_IF_ERROR(data_desc.Set(new_dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(data_desc.Set(new_dims, CudnnTensor::GetDataType())); CudnnTensor stats_desc; - ONNXRUNTIME_RETURN_IF_ERROR(stats_desc.Set(data_desc, CUDNN_BATCHNORM_SPATIAL)); + ORT_RETURN_IF_ERROR(stats_desc.Set(data_desc, CUDNN_BATCHNORM_SPATIAL)); CUDNN_RETURN_IF_ERROR(cudnnBatchNormalizationForwardTraining( CudnnHandle(), @@ -95,10 +95,10 @@ Status InstanceNorm::ComputeInternal(OpKernelContext* p_op_kernel_context) co auto image_size = input_count / stats_count; CudnnTensor data_desc; - ONNXRUNTIME_RETURN_IF_ERROR(data_desc.Set({1, stats_count, image_size, 1}, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(data_desc.Set({1, stats_count, image_size, 1}, CudnnTensor::GetDataType())); CudnnTensor stats_desc; - ONNXRUNTIME_RETURN_IF_ERROR(stats_desc.Set({1, stats_count, 1, 1}, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(stats_desc.Set({1, stats_count, 1, 1}, CudnnTensor::GetDataType())); auto mean = GetScratchBuffer(stats_count); auto variance = GetScratchBuffer(stats_count); diff --git a/onnxruntime/core/providers/cuda/nn/lrn.cc b/onnxruntime/core/providers/cuda/nn/lrn.cc index c25464cbf7a27..4936feb857f4a 100644 --- a/onnxruntime/core/providers/cuda/nn/lrn.cc +++ b/onnxruntime/core/providers/cuda/nn/lrn.cc @@ -23,19 +23,19 @@ REGISTER_KERNEL_TYPED(MLFloat16) template LRN::LRN(const OpKernelInfo& info) : CudaKernel(info) { int64_t size; - ONNXRUNTIME_ENFORCE(info.GetAttr("size", &size).IsOK()); - ONNXRUNTIME_ENFORCE(size > 0); - ONNXRUNTIME_ENFORCE(size % 2 == 1); + ORT_ENFORCE(info.GetAttr("size", &size).IsOK()); + ORT_ENFORCE(size > 0); + ORT_ENFORCE(size % 2 == 1); float alpha; float beta; - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha).IsOK()); - ONNXRUNTIME_ENFORCE(alpha > 0.0f); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta).IsOK()); - ONNXRUNTIME_ENFORCE(beta > 0.0f); + ORT_ENFORCE(info.GetAttr("alpha", &alpha).IsOK()); + ORT_ENFORCE(alpha > 0.0f); + ORT_ENFORCE(info.GetAttr("beta", &beta).IsOK()); + ORT_ENFORCE(beta > 0.0f); float bias = info.GetAttrOrDefault("bias", 1.0f); - ONNXRUNTIME_ENFORCE(norm_desc_.Set( + ORT_ENFORCE(norm_desc_.Set( gsl::narrow_cast(size), static_cast(alpha), static_cast(beta), @@ -51,12 +51,12 @@ Status LRN::ComputeInternal(OpKernelContext* context) const { auto rank = X->Shape().NumDimensions(); if (rank != 4 && rank != 5) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "cudnn LRN only supports 4D or 5D input"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "cudnn LRN only supports 4D or 5D input"); Tensor* Y = context->Output(0, X->Shape()); CudnnTensor x_tensor; - ONNXRUNTIME_RETURN_IF_ERROR(x_tensor.Set(X->Shape().GetDims(), CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(x_tensor.Set(X->Shape().GetDims(), CudnnTensor::GetDataType())); const auto one = Consts::One; const auto zero = Consts::Zero; diff --git a/onnxruntime/core/providers/cuda/nn/pool.cc b/onnxruntime/core/providers/cuda/nn/pool.cc index 89d5dcb4b584c..879417422f728 100644 --- a/onnxruntime/core/providers/cuda/nn/pool.cc +++ b/onnxruntime/core/providers/cuda/nn/pool.cc @@ -104,7 +104,7 @@ Status Pool::ComputeInternal(OpKernelContext* context) const { const auto& x_dims = x_shape.GetDims(); if (x_shape.NumDimensions() < 3) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input dimension cannot be less than 3."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input dimension cannot be less than 3."); } std::vector kernel_shape = kernel_shape_; @@ -139,15 +139,15 @@ Status Pool::ComputeInternal(OpKernelContext* context) const { const auto beta = Consts::Zero; CudnnTensor x_tensor; CudnnTensor y_tensor; - ONNXRUNTIME_RETURN_IF_ERROR(x_tensor.Set(x_dims_cudnn, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(y_tensor.Set(y_dims_cudnn, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(x_tensor.Set(x_dims_cudnn, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(y_tensor.Set(y_dims_cudnn, CudnnTensor::GetDataType())); cudnnPoolingMode_t mode = CUDNN_POOLING_MAX; if (PoolType::type == onnxruntime::PoolType::kAveragePool) { mode = count_include_pad_ ? CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING : CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; } CudnnPoolingDescriptor pooling_desc; - ONNXRUNTIME_RETURN_IF_ERROR(pooling_desc.Set(mode, kernel_shape, pads, strides)); + ORT_RETURN_IF_ERROR(pooling_desc.Set(mode, kernel_shape, pads, strides)); CUDNN_RETURN_IF_ERROR(cudnnPoolingForward(CudnnHandle(), pooling_desc, &alpha, x_tensor, x_data, &beta, y_tensor, y_data)); @@ -162,7 +162,7 @@ Status Pool>::ComputeInternal(OpKernelContext* context) const { const auto& x_dims = x_shape.GetDims(); if (x_shape.NumDimensions() < 3) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input dimension cannot be less than 3."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input dimension cannot be less than 3."); } std::vector kernel_shape = this->kernel_shape_; diff --git a/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc b/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc index 9d3cba282beaf..83bc9560639ff 100644 --- a/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc +++ b/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc @@ -60,12 +60,12 @@ template Status ReduceKernel::ComputeImpl(OpKernelContext* ctx, cudnnReduceTensorOp_t cudnnReduceOp) const { typedef typename ToCudaType::MappedType CudaT; const Tensor* X = ctx->Input(0); - ONNXRUNTIME_ENFORCE(nullptr != X); + ORT_ENFORCE(nullptr != X); const TensorShape input_shape{X->Shape()}; const auto rank = input_shape.NumDimensions(); if (rank > 8) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "cuDNN only supports up to 8-D tensors in reduction"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "cuDNN only supports up to 8-D tensors in reduction"); } const auto& input_dims = input_shape.GetDims(); @@ -114,13 +114,13 @@ Status ReduceKernel::ComputeImpl(OpKernelContext* ctx, cudnnRe } CudnnReduceDescriptor reduce_desc; - ONNXRUNTIME_RETURN_IF_ERROR(reduce_desc.Set(cudnnReduceOp, cudnn_type_X, ReduceTensorIndices)); + ORT_RETURN_IF_ERROR(reduce_desc.Set(cudnnReduceOp, cudnn_type_X, ReduceTensorIndices)); const auto one = Consts::One; const auto zero = Consts::Zero; CudnnTensor input_tensor; CudnnTensor output_tensor; - ONNXRUNTIME_RETURN_IF_ERROR(input_tensor.Set(input_dims_cudnn, cudnn_type_X)); - ONNXRUNTIME_RETURN_IF_ERROR(output_tensor.Set(output_dims_cudnn, cudnn_type_X)); + ORT_RETURN_IF_ERROR(input_tensor.Set(input_dims_cudnn, cudnn_type_X)); + ORT_RETURN_IF_ERROR(output_tensor.Set(output_dims_cudnn, cudnn_type_X)); size_t workspace_bytes = 0; CUDNN_RETURN_IF_ERROR(cudnnGetReductionWorkspaceSize(CudnnHandle(), reduce_desc, input_tensor, output_tensor, &workspace_bytes)); auto workspace_cuda = GetScratchBuffer(workspace_bytes); @@ -141,7 +141,7 @@ Status ReduceKernel::ComputeImpl(OpKernelContext* ctx, cudnnRe } else if (log_sum_exp_) { // Reduce max CudnnReduceDescriptor reduce_max_desc; - ONNXRUNTIME_RETURN_IF_ERROR(reduce_max_desc.Set(CUDNN_REDUCE_TENSOR_MAX, cudnn_type_X, CUDNN_REDUCE_TENSOR_NO_INDICES)); + ORT_RETURN_IF_ERROR(reduce_max_desc.Set(CUDNN_REDUCE_TENSOR_MAX, cudnn_type_X, CUDNN_REDUCE_TENSOR_NO_INDICES)); CUDNN_RETURN_IF_ERROR(cudnnReduceTensor( CudnnHandle(), reduce_max_desc, nullptr, 0, workspace_cuda.get(), workspace_bytes, &one, input_tensor, reinterpret_cast(X->template Data()), diff --git a/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.cc b/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.cc index 907e831ce984f..624d45b7a21ee 100644 --- a/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.cc +++ b/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.cc @@ -86,7 +86,7 @@ Status CudnnRnnBase::SetCudnnRnnDesc() { CudnnDropout cudnn_dropout_desc; cudnn_dropout_desc.Set(CudnnHandle()); - ONNXRUNTIME_RETURN_IF_ERROR(rnn_desc_.Set(CudnnHandle(), hidden_size_, num_layers_, cudnn_dropout_desc, + ORT_RETURN_IF_ERROR(rnn_desc_.Set(CudnnHandle(), hidden_size_, num_layers_, cudnn_dropout_desc, cudnn_direction, rnn_mode_, CudnnTensor::GetDataType())); return Status::OK(); @@ -110,7 +110,7 @@ Status CudnnRnnBase::ReorganizeWeights(const Tensor* W, const Tensor* R, cons size_t number = W_lin_layer_id_.size(); int64_t w_size = num_directions_ * (number * hidden_size_ * (input_size + hidden_size_ + 2)); std::vector dims_w({w_size, 1, 1}); - ONNXRUNTIME_RETURN_IF_ERROR(target_w_desc.Set(dims_w, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(target_w_desc.Set(dims_w, CudnnTensor::GetDataType())); std::vector fake_dims_x({1, input_size, 1}); CudnnTensor fake_x_desc; @@ -123,7 +123,7 @@ Status CudnnRnnBase::ReorganizeWeights(const Tensor* W, const Tensor* R, cons const T* R_data = R->template Data(); const T* B_data = B == nullptr ? nullptr : B->template Data(); - ONNXRUNTIME_RETURN_IF_ERROR(SetCudnnRnnWeightBias(CudnnHandle(), rnn_desc_, fake_x_desc, target_w_desc, + ORT_RETURN_IF_ERROR(SetCudnnRnnWeightBias(CudnnHandle(), rnn_desc_, fake_x_desc, target_w_desc, target_w_data.get(), W_data, R_data, B_data)); return Status::OK(); @@ -140,7 +140,7 @@ Status CudnnRnnBase::CacheCudnnRnnWeights(const OpKernelInfo& info) { if (get_W && get_R) { info.TryGetConstantInput(Input_Index::B, &B); - ONNXRUNTIME_RETURN_IF_ERROR(ReorganizeWeights(W, R, B, w_data_cache_, w_desc_cache_)); + ORT_RETURN_IF_ERROR(ReorganizeWeights(W, R, B, w_data_cache_, w_desc_cache_)); weight_cached_ = true; } @@ -153,7 +153,7 @@ Status CudnnRnnBase::ComputeInternal(OpKernelContext* ctx) const { // inputs const Tensor* X = ctx->Input(Input_Index::X); // inputs. [seq_length, batch_size, input_size] - ONNXRUNTIME_ENFORCE(nullptr != X); + ORT_ENFORCE(nullptr != X); // optional inputs const Tensor* sequence_lens = ctx->Input(Input_Index::sequence_lens); // [batch_size] @@ -189,10 +189,10 @@ Status CudnnRnnBase::ComputeInternal(OpKernelContext* ctx) const { CudnnTensor cx_desc; CudnnTensor y_h_desc; CudnnTensor y_c_desc; - ONNXRUNTIME_RETURN_IF_ERROR(hx_desc.Set(dims_hxy, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(cx_desc.Set(dims_hxy, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(y_h_desc.Set(dims_hxy, CudnnTensor::GetDataType())); - ONNXRUNTIME_RETURN_IF_ERROR(y_c_desc.Set(dims_hxy, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(hx_desc.Set(dims_hxy, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(cx_desc.Set(dims_hxy, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(y_h_desc.Set(dims_hxy, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(y_c_desc.Set(dims_hxy, CudnnTensor::GetDataType())); // Prepare the weight data IAllocatorUniquePtr w_data; diff --git a/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.h b/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.h index 18c82863a52ba..2aaced147ec3f 100644 --- a/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.h +++ b/onnxruntime/core/providers/cuda/rnn/cudnn_rnn_base.h @@ -95,11 +95,11 @@ class CudnnRnnBase : public CudaKernel { public: CudnnRnnBase(const OpKernelInfo& info) : CudaKernel{info} { reverse_ = false; - ONNXRUNTIME_ENFORCE(info.GetAttr("direction", &direction_).IsOK()); + ORT_ENFORCE(info.GetAttr("direction", &direction_).IsOK()); num_directions_ = direction_ == "bidirectional" ? 2 : 1; - ONNXRUNTIME_ENFORCE(allowed_directions.find(direction_) != allowed_directions.end()); + ORT_ENFORCE(allowed_directions.find(direction_) != allowed_directions.end()); - ONNXRUNTIME_ENFORCE(info.GetAttr("hidden_size", &hidden_size_).IsOK() && hidden_size_ > 0); + ORT_ENFORCE(info.GetAttr("hidden_size", &hidden_size_).IsOK() && hidden_size_ > 0); rnn_mode_ = CUDNN_LSTM; num_layers_ = 1; weight_cached_ = false; diff --git a/onnxruntime/core/providers/cuda/rnn/rnn.h b/onnxruntime/core/providers/cuda/rnn/rnn.h index 30cadec069f6f..246e8d1062df0 100644 --- a/onnxruntime/core/providers/cuda/rnn/rnn.h +++ b/onnxruntime/core/providers/cuda/rnn/rnn.h @@ -18,7 +18,7 @@ class RNN final : public CudnnRnnBase { public: RNN(const OpKernelInfo& info) : CudnnRnnBase(info) { std::vector activations_; - ONNXRUNTIME_ENFORCE(info.GetAttrs("activations", activations_).IsOK()); + ORT_ENFORCE(info.GetAttrs("activations", activations_).IsOK()); if (activations_[0] == "Relu") CudnnRnnBase::rnn_mode_ = CUDNN_RNN_RELU; else if (activations_[0] == "Tanh") diff --git a/onnxruntime/core/providers/cuda/shared_inc/fpgeneric.h b/onnxruntime/core/providers/cuda/shared_inc/fpgeneric.h index bad41a1bb2ef2..afbea16f95a1f 100644 --- a/onnxruntime/core/providers/cuda/shared_inc/fpgeneric.h +++ b/onnxruntime/core/providers/cuda/shared_inc/fpgeneric.h @@ -209,10 +209,10 @@ inline cublasStatus_t cublasScalHelper(cublasHandle_t handle, int n, const half* return cublasScalEx(handle, n, (void*)&tmp_alpha, CUDA_R_32F, (void*)x, CUDA_R_16F, incx, CUDA_R_32F); } inline cublasStatus_t cublasScalHelper(cublasHandle_t, int, const char*, char*, int) { - ONNXRUNTIME_NOT_IMPLEMENTED("Unsupported template argument(char) in cublas_scal"); + ORT_NOT_IMPLEMENTED("Unsupported template argument(char) in cublas_scal"); } inline cublasStatus_t cublasScalHelper(cublasHandle_t, int, const short*, short*, int) { - ONNXRUNTIME_NOT_IMPLEMENTED("Unsupported template argument(short) in cublas_scal"); + ORT_NOT_IMPLEMENTED("Unsupported template argument(short) in cublas_scal"); } // dot @@ -245,11 +245,11 @@ inline curandStatus_t curandGenerateUniformHelper(curandGenerator_t generator, d curandStatus_t curandGenerateUniformHelper(curandGenerator_t, half* outputPtr, size_t num); inline curandStatus_t curandGenerateUniformHelper(curandGenerator_t, char*, size_t) { - ONNXRUNTIME_NOT_IMPLEMENTED("Unsupported template argument(char) in GPUSparseMatrix"); + ORT_NOT_IMPLEMENTED("Unsupported template argument(char) in GPUSparseMatrix"); } inline curandStatus_t curandGenerateUniformHelper(curandGenerator_t, short*, size_t) { - ONNXRUNTIME_NOT_IMPLEMENTED("Unsupported template argument(short) in GPUSparseMatrix"); + ORT_NOT_IMPLEMENTED("Unsupported template argument(short) in GPUSparseMatrix"); } inline curandStatus_t curandGenerateNormalHelper(curandGenerator_t generator, float* outputPtr, size_t n, float mean, float stddev) { @@ -261,9 +261,9 @@ inline curandStatus_t curandGenerateNormalHelper(curandGenerator_t generator, do curandStatus_t curandGenerateNormalHelper(curandGenerator_t, half* outputPtr, size_t n, half mean, half stddev); inline curandStatus_t curandGenerateNormalHelper(curandGenerator_t, char*, size_t, char, char) { - ONNXRUNTIME_NOT_IMPLEMENTED("Unsupported template argument(char) in GPUSparseMatrix"); + ORT_NOT_IMPLEMENTED("Unsupported template argument(char) in GPUSparseMatrix"); } inline curandStatus_t curandGenerateNormalHelper(curandGenerator_t, short*, size_t, short, short) { - ONNXRUNTIME_NOT_IMPLEMENTED("Unsupported template argument(short) in GPUSparseMatrix"); + ORT_NOT_IMPLEMENTED("Unsupported template argument(short) in GPUSparseMatrix"); } diff --git a/onnxruntime/core/providers/cuda/symbols.txt b/onnxruntime/core/providers/cuda/symbols.txt index ed660e42fe162..30d625edc0790 100644 --- a/onnxruntime/core/providers/cuda/symbols.txt +++ b/onnxruntime/core/providers/cuda/symbols.txt @@ -1 +1 @@ -ONNXRuntimeCreateCUDAExecutionProviderFactory +OrtCreateCUDAExecutionProviderFactory diff --git a/onnxruntime/core/providers/cuda/tensor/cast_op.cc b/onnxruntime/core/providers/cuda/tensor/cast_op.cc index a0db09da86bfa..ef46535e1386e 100644 --- a/onnxruntime/core/providers/cuda/tensor/cast_op.cc +++ b/onnxruntime/core/providers/cuda/tensor/cast_op.cc @@ -66,11 +66,11 @@ Status Cast::ComputeInternal(OpKernelContext* context) const { CASE(TensorProto_DataType_UINT64, uint64_t) CASE(TensorProto_DataType_BOOL, bool) case TensorProto_DataType_STRING: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Casting to and from strings is not supported yet."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Casting to and from strings is not supported yet."); case TensorProto_DataType_UNDEFINED: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Cast op must have 'to' argument of type DataType"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Cast op must have 'to' argument of type DataType"); default: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Unexpected 'to' argument value: ", to_); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Unexpected 'to' argument value: ", to_); } return Status::OK(); } diff --git a/onnxruntime/core/providers/cuda/tensor/cast_op.h b/onnxruntime/core/providers/cuda/tensor/cast_op.h index c88f2afee0c89..f75b2549e1199 100644 --- a/onnxruntime/core/providers/cuda/tensor/cast_op.h +++ b/onnxruntime/core/providers/cuda/tensor/cast_op.h @@ -14,7 +14,7 @@ class Cast final : public CudaKernel { Cast(const OpKernelInfo& info) : CudaKernel(info) { int64_t to; Status status = info.GetAttr("to", &to); - ONNXRUNTIME_ENFORCE(status.IsOK(), "Attribute to is not set."); + ORT_ENFORCE(status.IsOK(), "Attribute to is not set."); to_ = gsl::narrow_cast(to); } diff --git a/onnxruntime/core/providers/cuda/tensor/compress.cc b/onnxruntime/core/providers/cuda/tensor/compress.cc index 354a85483ad79..4e33a421846b9 100644 --- a/onnxruntime/core/providers/cuda/tensor/compress.cc +++ b/onnxruntime/core/providers/cuda/tensor/compress.cc @@ -18,15 +18,15 @@ ONNX_OPERATOR_KERNEL_EX( Status Compress::ComputeInternal(OpKernelContext* ctx) const { const Tensor* input_tensor = ctx->Input(0); - ONNXRUNTIME_ENFORCE(input_tensor); + ORT_ENFORCE(input_tensor); size_t rank = input_tensor->Shape().NumDimensions(); auto& input_dimensions = input_tensor->Shape().GetDims(); if (has_axis_) { - ONNXRUNTIME_ENFORCE(axis_ < static_cast(rank), "axis greater than input data dimension!"); + ORT_ENFORCE(axis_ < static_cast(rank), "axis greater than input data dimension!"); } const Tensor* condition = ctx->Input(1); - ONNXRUNTIME_ENFORCE(condition); + ORT_ENFORCE(condition); auto condition_length = condition->Shape().Size(); auto condition_data = condition->template Data(); @@ -64,7 +64,7 @@ Status Compress::ComputeInternal(OpKernelContext* ctx) const { } } - ONNXRUNTIME_RETURN_IF_ERROR(CompressImpl(element_bytes, + ORT_RETURN_IF_ERROR(CompressImpl(element_bytes, gsl::narrow_cast(valid_condition_length), gsl::narrow_cast(axis_right_stride), has_axis_ ? gsl::narrow_cast(input_dimensions[axis_]) : gsl::narrow_cast(input_size), diff --git a/onnxruntime/core/providers/cuda/tensor/compress_impl.cu b/onnxruntime/core/providers/cuda/tensor/compress_impl.cu index 6e889a9d81655..019980b68c7bd 100644 --- a/onnxruntime/core/providers/cuda/tensor/compress_impl.cu +++ b/onnxruntime/core/providers/cuda/tensor/compress_impl.cu @@ -107,7 +107,7 @@ Status CompressImpl(const size_t element_bytes, (CUDA_LONG)N); break; default: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator"); } return Status::OK(); diff --git a/onnxruntime/core/providers/cuda/tensor/concat.cc b/onnxruntime/core/providers/cuda/tensor/concat.cc index a6a7100ec967c..27f7081de34de 100644 --- a/onnxruntime/core/providers/cuda/tensor/concat.cc +++ b/onnxruntime/core/providers/cuda/tensor/concat.cc @@ -18,7 +18,7 @@ Status Concat::ComputeInternal(OpKernelContext* ctx) const { auto input_count = Node().InputArgCount().front(); Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(ctx, input_count, p)); + ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, input_count, p)); int64_t output_offset = 0; auto element_bytes = p.output_tensor->DataType()->Size(); diff --git a/onnxruntime/core/providers/cuda/tensor/crop.cc b/onnxruntime/core/providers/cuda/tensor/crop.cc index fa1d2bf2b9556..3170ac9a7e0e2 100644 --- a/onnxruntime/core/providers/cuda/tensor/crop.cc +++ b/onnxruntime/core/providers/cuda/tensor/crop.cc @@ -25,7 +25,7 @@ REGISTER_KERNEL_TYPED(MLFloat16) template Status Crop::ComputeInternal(OpKernelContext* context) const { const Tensor* X = context->Input(0); - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInput(X)); + ORT_RETURN_IF_ERROR(ValidateInput(X)); const auto dims = X->Shape().GetDims(); const int64_t N = dims[0]; diff --git a/onnxruntime/core/providers/cuda/tensor/flatten.cc b/onnxruntime/core/providers/cuda/tensor/flatten.cc index f5b959bbdb7de..3f37e5d1bde95 100644 --- a/onnxruntime/core/providers/cuda/tensor/flatten.cc +++ b/onnxruntime/core/providers/cuda/tensor/flatten.cc @@ -19,7 +19,7 @@ ONNX_OPERATOR_KERNEL_EX( Status Flatten::ComputeInternal(OpKernelContext* ctx) const { const Tensor* X = ctx->Input(0); const TensorShape& X_shape = X->Shape(); - ONNXRUNTIME_ENFORCE(gsl::narrow_cast(X_shape.NumDimensions()) >= axis_, "The rank of input tensor must be >= axis"); + ORT_ENFORCE(gsl::narrow_cast(X_shape.NumDimensions()) >= axis_, "The rank of input tensor must be >= axis"); Tensor* Y = ctx->Output(0, TensorShape({X_shape.SizeToDimension(axis_), X_shape.SizeFromDimension(axis_)})); //If source and target pointers are not equal (non-inplace operation), we need to copy the data. diff --git a/onnxruntime/core/providers/cuda/tensor/flatten.h b/onnxruntime/core/providers/cuda/tensor/flatten.h index 9e3066ddd1a89..bfecb2906bada 100644 --- a/onnxruntime/core/providers/cuda/tensor/flatten.h +++ b/onnxruntime/core/providers/cuda/tensor/flatten.h @@ -11,7 +11,7 @@ namespace cuda { class Flatten final : public CudaKernel { public: Flatten(const OpKernelInfo& info) : CudaKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("axis", &axis_).IsOK()); + ORT_ENFORCE(info.GetAttr("axis", &axis_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/cuda/tensor/gather.cc b/onnxruntime/core/providers/cuda/tensor/gather.cc index 4f6ae9348ea2d..cef3fe397769c 100644 --- a/onnxruntime/core/providers/cuda/tensor/gather.cc +++ b/onnxruntime/core/providers/cuda/tensor/gather.cc @@ -50,7 +50,7 @@ ONNX_OPERATOR_KERNEL_EX( Status Gather::ComputeInternal(OpKernelContext* context) const { Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(context, p)); + ORT_RETURN_IF_ERROR(PrepareForCompute(context, p)); const TensorShape& input_shape = p.input_tensor->Shape(); @@ -66,7 +66,7 @@ Status Gather::ComputeInternal(OpKernelContext* context) const { gsl::span div_strides_span = div_strides.CpuSpan(); div_strides_span[0] = fast_divmod(gsl::narrow_cast(output_block_size)); div_strides_span[1] = fast_divmod(gsl::narrow_cast(block_size)); - ONNXRUNTIME_RETURN_IF_ERROR(div_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(div_strides.CopyToGpu()); MLDataType T_type = p.input_tensor->DataType(); MLDataType Tin_type = p.indices_tensor->DataType(); @@ -84,7 +84,7 @@ Status Gather::ComputeInternal(OpKernelContext* context) const { TYPED_FUNCTION_CALL(double) TYPED_FUNCTION_CALL(bool) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type for Tind not supported yet in Gather."); + return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type for Tind not supported yet in Gather."); } } // namespace cuda diff --git a/onnxruntime/core/providers/cuda/tensor/image_scaler.cc b/onnxruntime/core/providers/cuda/tensor/image_scaler.cc index be672b134cce9..2598dd594624a 100644 --- a/onnxruntime/core/providers/cuda/tensor/image_scaler.cc +++ b/onnxruntime/core/providers/cuda/tensor/image_scaler.cc @@ -24,8 +24,8 @@ REGISTER_KERNEL_TYPED(MLFloat16) template ImageScaler::ImageScaler(const OpKernelInfo& info) : CudaKernel(info) { - ONNXRUNTIME_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttrs("bias", bias_).IsOK()); + ORT_ENFORCE(info.GetAttr("scale", &scale_).IsOK()); + ORT_ENFORCE(info.GetAttrs("bias", bias_).IsOK()); b_data_ = GetScratchBuffer(bias_.size()); CUDA_CALL_THROW(cudaMemcpy(b_data_.get(), bias_.data(), sizeof(float) * bias_.size(), cudaMemcpyHostToDevice)); @@ -37,14 +37,14 @@ Status ImageScaler::ComputeInternal(OpKernelContext* context) const { const auto dims = X->Shape().GetDims(); if (dims.size() != 4) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input is expected to have four dimensions corresponding to [N,C,H,W], got ", dims.size()); } const int64_t C = dims[1]; // dims are NCHW if (!bias_.empty() && bias_.size() != static_cast(C)) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Bias size (", bias_.size(), ") does not match the number of channels (", C, ")"); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Bias size (", bias_.size(), ") does not match the number of channels (", C, ")"); } Tensor* Y = context->Output(0, X->Shape()); diff --git a/onnxruntime/core/providers/cuda/tensor/pad.cc b/onnxruntime/core/providers/cuda/tensor/pad.cc index 809980235fbe4..f57779706cd15 100644 --- a/onnxruntime/core/providers/cuda/tensor/pad.cc +++ b/onnxruntime/core/providers/cuda/tensor/pad.cc @@ -34,7 +34,7 @@ Status Pad::ComputeInternal(OpKernelContext* ctx) const { TensorPitches::Calculate(input_strides.CpuSpan(), input_shape.GetDims()); std::vector output_dims(input_shape.GetDims()); - ONNXRUNTIME_ENFORCE(dimension_count * 2 == pads_.size(), "'pads' attribute has wrong number of values"); + ORT_ENFORCE(dimension_count * 2 == pads_.size(), "'pads' attribute has wrong number of values"); // Calculate output dimensions, and handle any negative padding auto lower_pads_span = lower_pads.CpuSpan(); @@ -46,12 +46,12 @@ Status Pad::ComputeInternal(OpKernelContext* ctx) const { } TensorShape output_shape(output_dims); auto& output_tensor = *ctx->Output(0, output_shape); - ONNXRUNTIME_ENFORCE(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_dims)); - ONNXRUNTIME_RETURN_IF_ERROR(input_dims.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(input_strides.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(lower_pads.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(upper_pads.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); + ORT_ENFORCE(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_dims)); + ORT_RETURN_IF_ERROR(input_dims.CopyToGpu()); + ORT_RETURN_IF_ERROR(input_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(lower_pads.CopyToGpu()); + ORT_RETURN_IF_ERROR(upper_pads.CopyToGpu()); + ORT_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); PadImpl( dimension_count, diff --git a/onnxruntime/core/providers/cuda/tensor/reshape.cc b/onnxruntime/core/providers/cuda/tensor/reshape.cc index 501336d552ae9..5ceec4cba3283 100644 --- a/onnxruntime/core/providers/cuda/tensor/reshape.cc +++ b/onnxruntime/core/providers/cuda/tensor/reshape.cc @@ -15,7 +15,7 @@ ONNX_OPERATOR_KERNEL_EX( .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("shape", DataTypeImpl::GetTensorType()) .Alias(0, 0) - .InputMemoryType(1), + .InputMemoryType(1), Reshape); ONNX_OPERATOR_VERSIONED_KERNEL_EX( diff --git a/onnxruntime/core/providers/cuda/tensor/reshape.h b/onnxruntime/core/providers/cuda/tensor/reshape.h index 56169f027a98c..8fdbf44da82f4 100644 --- a/onnxruntime/core/providers/cuda/tensor/reshape.h +++ b/onnxruntime/core/providers/cuda/tensor/reshape.h @@ -20,7 +20,7 @@ class Reshape final : public CudaKernel { Status ComputeInternal(OpKernelContext* context) const override { // Copy the second input tensor into the shape vector const Tensor* shapeTensor = context->Input(1); - ONNXRUNTIME_ENFORCE(shapeTensor->Shape().NumDimensions() == 1, + ORT_ENFORCE(shapeTensor->Shape().NumDimensions() == 1, "A shape tensor must be a vector tensor."); size_t nDims = static_cast(shapeTensor->Shape()[0]); const int64_t* data = shapeTensor->template Data(); @@ -49,7 +49,7 @@ class Reshape_1 final : public CudaKernel { public: Reshape_1(const OpKernelInfo& info) : CudaKernel(info) { Status status = info.GetAttrs("shape", shape_); - ONNXRUNTIME_ENFORCE(status.IsOK(), "Attribute shape is not set."); + ORT_ENFORCE(status.IsOK(), "Attribute shape is not set."); } Status ComputeInternal(OpKernelContext* context) const override { diff --git a/onnxruntime/core/providers/cuda/tensor/shape_op.cc b/onnxruntime/core/providers/cuda/tensor/shape_op.cc index b0608dd364cd3..e9ccd80764307 100644 --- a/onnxruntime/core/providers/cuda/tensor/shape_op.cc +++ b/onnxruntime/core/providers/cuda/tensor/shape_op.cc @@ -23,7 +23,7 @@ ONNX_OPERATOR_KERNEL_EX( 1, kCudaExecutionProvider, KernelDefBuilder() - .OutputMemoryType(0) + .OutputMemoryType(0) .TypeConstraint("T", shapeOpTypeConstraints) .TypeConstraint("T1", DataTypeImpl::GetTensorType()), Shape); diff --git a/onnxruntime/core/providers/cuda/tensor/slice.cc b/onnxruntime/core/providers/cuda/tensor/slice.cc index 59f4f5f796261..f1e4e0730a51c 100644 --- a/onnxruntime/core/providers/cuda/tensor/slice.cc +++ b/onnxruntime/core/providers/cuda/tensor/slice.cc @@ -18,7 +18,7 @@ ONNX_OPERATOR_KERNEL_EX( Status Slice::ComputeInternal(OpKernelContext* ctx) const { auto input_tensor = ctx->Input(0); - ONNXRUNTIME_ENFORCE(nullptr != input_tensor); + ORT_ENFORCE(nullptr != input_tensor); auto& input_dimensions = input_tensor->Shape().GetDims(); // Initialize the starts & ends to the actual tensor shape @@ -26,7 +26,7 @@ Status Slice::ComputeInternal(OpKernelContext* ctx) const { std::vector starts(dimension_count, 0); std::vector output_dims(input_dimensions); - ONNXRUNTIME_RETURN_IF_ERROR(PrepareForCompute(dimension_count, input_dimensions, starts, output_dims)); + ORT_RETURN_IF_ERROR(PrepareForCompute(dimension_count, input_dimensions, starts, output_dims)); TensorShape output_shape(output_dims); auto output_tensor = ctx->Output(0, output_shape); @@ -43,7 +43,7 @@ Status Slice::ComputeInternal(OpKernelContext* ctx) const { starts_buffer.CopyToGpu(); CudaAsyncBuffer input_strides(this, device_id, dimension_count); - ONNXRUNTIME_ENFORCE(TensorPitches::Calculate(input_strides.CpuSpan(), input_dimensions)); + ORT_ENFORCE(TensorPitches::Calculate(input_strides.CpuSpan(), input_dimensions)); input_strides.CopyToGpu(); TensorPitches output_pitches(output_dims); @@ -57,7 +57,7 @@ Status Slice::ComputeInternal(OpKernelContext* ctx) const { size_t element_size = input_tensor->DataType()->Size(); - ONNXRUNTIME_RETURN_IF_ERROR(SliceImpl(element_size, + ORT_RETURN_IF_ERROR(SliceImpl(element_size, gsl::narrow_cast(dimension_count), starts_buffer.GpuPtr(), input_strides.GpuPtr(), diff --git a/onnxruntime/core/providers/cuda/tensor/slice_impl.cu b/onnxruntime/core/providers/cuda/tensor/slice_impl.cu index 712148c08c7e2..1cceac8895cde 100644 --- a/onnxruntime/core/providers/cuda/tensor/slice_impl.cu +++ b/onnxruntime/core/providers/cuda/tensor/slice_impl.cu @@ -71,7 +71,7 @@ Status SliceImpl(const size_t element_size, (CUDA_LONG)N); break; default: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator"); } return Status::OK(); diff --git a/onnxruntime/core/providers/cuda/tensor/tile.cc b/onnxruntime/core/providers/cuda/tensor/tile.cc index 39d279ea1cef8..263a789bce190 100644 --- a/onnxruntime/core/providers/cuda/tensor/tile.cc +++ b/onnxruntime/core/providers/cuda/tensor/tile.cc @@ -16,7 +16,7 @@ namespace cuda { T, \ kCudaExecutionProvider, \ KernelDefBuilder() \ - .InputMemoryType(1) \ + .InputMemoryType(1) \ .TypeConstraint("T", DataTypeImpl::GetTensorType()), \ Tile); @@ -47,16 +47,16 @@ Status Tile::ComputeInternal(OpKernelContext* ctx) const { CudaAsyncBuffer fdm_input_shape(this, device_id, rank); CudaAsyncBuffer fdm_output_strides(this, device_id, rank); - ONNXRUNTIME_ENFORCE(TensorPitches::Calculate(input_strides.CpuSpan(), input_shape)); - ONNXRUNTIME_ENFORCE(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_dims)); + ORT_ENFORCE(TensorPitches::Calculate(input_strides.CpuSpan(), input_shape)); + ORT_ENFORCE(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_dims)); auto fdm_input_shape_span = fdm_input_shape.CpuSpan(); for (size_t i = 0; i < input_shape.size(); ++i) fdm_input_shape_span[i] = fast_divmod(gsl::narrow_cast(input_shape[i])); - ONNXRUNTIME_RETURN_IF_ERROR(fdm_input_shape.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(input_strides.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(fdm_input_shape.CopyToGpu()); + ORT_RETURN_IF_ERROR(input_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); TileImpl( rank, diff --git a/onnxruntime/core/providers/cuda/tensor/transpose.cc b/onnxruntime/core/providers/cuda/tensor/transpose.cc index 3255718a1a3d3..670e4f2843f53 100644 --- a/onnxruntime/core/providers/cuda/tensor/transpose.cc +++ b/onnxruntime/core/providers/cuda/tensor/transpose.cc @@ -37,12 +37,12 @@ Status Transpose::ComputeInternal(OpKernelContext* ctx) const { CudaAsyncBuffer input_strides(this, device_id, rank); CudaAsyncBuffer perm(this, device_id, *p_perm); CudaAsyncBuffer fdm_output_strides(this, device_id, rank); - ONNXRUNTIME_ENFORCE(TensorPitches::Calculate(input_strides.CpuSpan(), input_dims)); - ONNXRUNTIME_ENFORCE(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_dims)); + ORT_ENFORCE(TensorPitches::Calculate(input_strides.CpuSpan(), input_dims)); + ORT_ENFORCE(CalculateFdmStrides(fdm_output_strides.CpuSpan(), output_dims)); - ONNXRUNTIME_RETURN_IF_ERROR(input_strides.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(perm.CopyToGpu()); - ONNXRUNTIME_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(input_strides.CopyToGpu()); + ORT_RETURN_IF_ERROR(perm.CopyToGpu()); + ORT_RETURN_IF_ERROR(fdm_output_strides.CopyToGpu()); TransposeImpl( rank, diff --git a/onnxruntime/core/providers/cuda/tensor/unsqueeze.cc b/onnxruntime/core/providers/cuda/tensor/unsqueeze.cc index 3312d57d17436..3da89a7dfe485 100644 --- a/onnxruntime/core/providers/cuda/tensor/unsqueeze.cc +++ b/onnxruntime/core/providers/cuda/tensor/unsqueeze.cc @@ -18,7 +18,7 @@ ONNX_OPERATOR_KERNEL_EX( Status Unsqueeze::ComputeInternal(OpKernelContext* ctx) const { Prepare p; - ONNXRUNTIME_RETURN_IF_ERROR(PrepareCompute(ctx, p)); + ORT_RETURN_IF_ERROR(PrepareCompute(ctx, p)); const void* input = p.input_tensor->DataRaw(); void* output = p.output_tensor->MutableDataRaw(); diff --git a/onnxruntime/core/providers/cuda/tensor/upsample.cc b/onnxruntime/core/providers/cuda/tensor/upsample.cc index f7312ef9716de..11dcec7a9f7b3 100644 --- a/onnxruntime/core/providers/cuda/tensor/upsample.cc +++ b/onnxruntime/core/providers/cuda/tensor/upsample.cc @@ -29,7 +29,7 @@ REGISTER_KERNEL_TYPED(int32_t) template Status Upsample::ComputeInternal(OpKernelContext* context) const { const Tensor* X = context->Input(0); - ONNXRUNTIME_ENFORCE(nullptr != X); + ORT_ENFORCE(nullptr != X); const std::vector& X_dims = X->Shape().GetDims(); auto rank = X_dims.size(); if (rank == 0) diff --git a/onnxruntime/core/providers/mkldnn/math/gemm.cc b/onnxruntime/core/providers/mkldnn/math/gemm.cc index c2648b159c189..4ea99249404a9 100644 --- a/onnxruntime/core/providers/mkldnn/math/gemm.cc +++ b/onnxruntime/core/providers/mkldnn/math/gemm.cc @@ -89,7 +89,7 @@ Status Gemm::Compute(OpKernelContext* ctx) const { if (status == mkldnn_success) { return Status::OK(); } else { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "mkldnn_sgemm failed with status: ", status); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "mkldnn_sgemm failed with status: ", status); } } diff --git a/onnxruntime/core/providers/mkldnn/math/gemm.h b/onnxruntime/core/providers/mkldnn/math/gemm.h index d5d23b7bbd0ca..7d6d49fc1c615 100644 --- a/onnxruntime/core/providers/mkldnn/math/gemm.h +++ b/onnxruntime/core/providers/mkldnn/math/gemm.h @@ -11,14 +11,14 @@ class Gemm final : public OpKernel { public: Gemm(const OpKernelInfo& info) : OpKernel(info) { int64_t temp; - ONNXRUNTIME_ENFORCE(info.GetAttr("transA", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("transA", &temp).IsOK()); trans_A_ = (temp != 0); - ONNXRUNTIME_ENFORCE(info.GetAttr("transB", &temp).IsOK()); + ORT_ENFORCE(info.GetAttr("transB", &temp).IsOK()); trans_B_ = (temp != 0); - ONNXRUNTIME_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); - ONNXRUNTIME_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); + ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); + ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status Compute(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/mkldnn/mkldnn_allocator.cc b/onnxruntime/core/providers/mkldnn/mkldnn_allocator.cc index a7db234a33ebb..a58af56b4453c 100644 --- a/onnxruntime/core/providers/mkldnn/mkldnn_allocator.cc +++ b/onnxruntime/core/providers/mkldnn/mkldnn_allocator.cc @@ -6,13 +6,13 @@ namespace onnxruntime { -const ONNXRuntimeAllocatorInfo& MKLDNNAllocator::Info() const { - static constexpr ONNXRuntimeAllocatorInfo mkl_allocator_info(MKLDNN, ONNXRuntimeAllocatorType::ONNXRuntimeDeviceAllocator, 0, ONNXRuntimeMemTypeDefault); +const OrtAllocatorInfo& MKLDNNAllocator::Info() const { + static constexpr OrtAllocatorInfo mkl_allocator_info(MKLDNN, OrtAllocatorType::OrtDeviceAllocator, 0, OrtMemTypeDefault); return mkl_allocator_info; } -const ONNXRuntimeAllocatorInfo& MKLDNNCPUAllocator::Info() const { - static constexpr ONNXRuntimeAllocatorInfo mkl_cpu_allocator_info(MKLDNN_CPU, ONNXRuntimeAllocatorType::ONNXRuntimeDeviceAllocator, 0, ONNXRuntimeMemTypeCPUOutput); +const OrtAllocatorInfo& MKLDNNCPUAllocator::Info() const { + static constexpr OrtAllocatorInfo mkl_cpu_allocator_info(MKLDNN_CPU, OrtAllocatorType::OrtDeviceAllocator, 0, OrtMemTypeCPUOutput); return mkl_cpu_allocator_info; } } // namespace onnxruntime diff --git a/onnxruntime/core/providers/mkldnn/mkldnn_allocator.h b/onnxruntime/core/providers/mkldnn/mkldnn_allocator.h index afcc7b55397ae..ac888e83217b9 100644 --- a/onnxruntime/core/providers/mkldnn/mkldnn_allocator.h +++ b/onnxruntime/core/providers/mkldnn/mkldnn_allocator.h @@ -11,10 +11,10 @@ constexpr const char* MKLDNN_CPU = "MklDnnCpu"; class MKLDNNAllocator : public CPUAllocator { public: - const ONNXRuntimeAllocatorInfo& Info() const override; + const OrtAllocatorInfo& Info() const override; }; class MKLDNNCPUAllocator : public CPUAllocator { public: - const ONNXRuntimeAllocatorInfo& Info() const override; + const OrtAllocatorInfo& Info() const override; }; } // namespace onnxruntime diff --git a/onnxruntime/core/providers/mkldnn/mkldnn_common.h b/onnxruntime/core/providers/mkldnn/mkldnn_common.h index d103791a2a2b1..968af203cfcc3 100644 --- a/onnxruntime/core/providers/mkldnn/mkldnn_common.h +++ b/onnxruntime/core/providers/mkldnn/mkldnn_common.h @@ -47,7 +47,7 @@ class PrimitivePool { auto& map = PrimitivePool::GetMap(); auto iter = map.find(key); // We should not find a primitive already using this key. - ONNXRUNTIME_ENFORCE(iter == map.end(), "duplicate key: " + key); + ORT_ENFORCE(iter == map.end(), "duplicate key: " + key); map.insert(std::make_pair(key, std::move(primitive))); } diff --git a/onnxruntime/core/providers/mkldnn/mkldnn_execution_provider.cc b/onnxruntime/core/providers/mkldnn/mkldnn_execution_provider.cc index f98ce0c68bd48..d8889419a7251 100644 --- a/onnxruntime/core/providers/mkldnn/mkldnn_execution_provider.cc +++ b/onnxruntime/core/providers/mkldnn/mkldnn_execution_provider.cc @@ -17,7 +17,7 @@ ONNX_OPERATOR_KERNEL_EX( kOnnxDomain, 1, kMklDnnExecutionProvider, - KernelDefBuilder().InputMemoryType(0).TypeConstraint("T", DataTypeImpl::AllTensorTypes()), + KernelDefBuilder().InputMemoryType(0).TypeConstraint("T", DataTypeImpl::AllTensorTypes()), Memcpy); ONNX_OPERATOR_KERNEL_EX( @@ -25,17 +25,17 @@ ONNX_OPERATOR_KERNEL_EX( kOnnxDomain, 1, kMklDnnExecutionProvider, - KernelDefBuilder().OutputMemoryType(0).TypeConstraint("T", DataTypeImpl::AllTensorTypes()), + KernelDefBuilder().OutputMemoryType(0).TypeConstraint("T", DataTypeImpl::AllTensorTypes()), Memcpy); } // namespace mkl_dnn MKLDNNExecutionProvider::MKLDNNExecutionProvider(const MKLDNNExecutionProviderInfo& /*info*/) { - DeviceAllocatorRegistrationInfo default_allocator_info({ONNXRuntimeMemTypeDefault, + DeviceAllocatorRegistrationInfo default_allocator_info({OrtMemTypeDefault, [](int) { return std::make_unique(); }, std::numeric_limits::max()}); InsertAllocator(CreateAllocator(default_allocator_info)); - DeviceAllocatorRegistrationInfo cpu_allocator_info({ONNXRuntimeMemTypeCPUOutput, + DeviceAllocatorRegistrationInfo cpu_allocator_info({OrtMemTypeCPUOutput, [](int) { return std::make_unique(); }, std::numeric_limits::max()}); InsertAllocator(CreateAllocator(cpu_allocator_info)); } @@ -48,7 +48,7 @@ Status MKLDNNExecutionProvider::CopyTensor(const Tensor& src, Tensor& dst) const if (!(strcmp(src.Location().name, MKLDNN) == 0 && strcmp(dst.Location().name, CPU) == 0) && !(strcmp(src.Location().name, CPU) == 0 && strcmp(dst.Location().name, MKLDNN) == 0) && !(strcmp(src.Location().name, MKLDNN) == 0 && strcmp(dst.Location().name, MKLDNN_CPU) == 0)) { - ONNXRUNTIME_NOT_IMPLEMENTED(src.Location().name, " copy to ", dst.Location().name, " is not implemented"); + ORT_NOT_IMPLEMENTED(src.Location().name, " copy to ", dst.Location().name, " is not implemented"); } // Todo: Copy for now. May optimize later to avoid copy. diff --git a/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.cc b/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.cc index 7cb46db6f69eb..d9d9a543462b6 100644 --- a/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.cc +++ b/onnxruntime/core/providers/mkldnn/mkldnn_provider_factory.cc @@ -9,35 +9,35 @@ using namespace onnxruntime; namespace { struct MkldnnProviderFactory { - const ONNXRuntimeProviderFactoryInterface* const cls; + const OrtProviderFactoryInterface* const cls; std::atomic_int ref_count; bool create_arena; MkldnnProviderFactory(); }; -ONNXStatus* ONNXRUNTIME_API_CALL CreateMkldnn(void* this_, ONNXRuntimeProvider** out) { +ONNXStatus* ORT_API_CALL CreateMkldnn(void* this_, OrtProvider** out) { MKLDNNExecutionProviderInfo info; MkldnnProviderFactory* this_ptr = (MkldnnProviderFactory*)this_; info.create_arena = this_ptr->create_arena; MKLDNNExecutionProvider* ret = new MKLDNNExecutionProvider(info); - *out = (ONNXRuntimeProvider*)ret; + *out = (OrtProvider*)ret; return nullptr; } -uint32_t ONNXRUNTIME_API_CALL ReleaseMkldnn(void* this_) { +uint32_t ORT_API_CALL ReleaseMkldnn(void* this_) { MkldnnProviderFactory* this_ptr = (MkldnnProviderFactory*)this_; if (--this_ptr->ref_count == 0) delete this_ptr; return 0; } -uint32_t ONNXRUNTIME_API_CALL AddRefMkldnn(void* this_) { +uint32_t ORT_API_CALL AddRefMkldnn(void* this_) { MkldnnProviderFactory* this_ptr = (MkldnnProviderFactory*)this_; ++this_ptr->ref_count; return 0; } -constexpr ONNXRuntimeProviderFactoryInterface mkl_cls = { +constexpr OrtProviderFactoryInterface mkl_cls = { {AddRefMkldnn, ReleaseMkldnn}, CreateMkldnn, @@ -46,9 +46,9 @@ constexpr ONNXRuntimeProviderFactoryInterface mkl_cls = { MkldnnProviderFactory::MkldnnProviderFactory() : cls(&mkl_cls), ref_count(1), create_arena(true) {} } // namespace -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateMkldnnExecutionProviderFactory, int use_arena, _Out_ ONNXRuntimeProviderFactoryInterface*** out) { +ORT_API_STATUS_IMPL(OrtCreateMkldnnExecutionProviderFactory, int use_arena, _Out_ OrtProviderFactoryInterface*** out) { MkldnnProviderFactory* ret = new MkldnnProviderFactory(); ret->create_arena = (use_arena != 0); - *out = (ONNXRuntimeProviderFactoryInterface**)ret; + *out = (OrtProviderFactoryInterface**)ret; return nullptr; } diff --git a/onnxruntime/core/providers/mkldnn/nn/conv.cc b/onnxruntime/core/providers/mkldnn/nn/conv.cc index ea0656e900967..355562bf5adcd 100644 --- a/onnxruntime/core/providers/mkldnn/nn/conv.cc +++ b/onnxruntime/core/providers/mkldnn/nn/conv.cc @@ -265,7 +265,7 @@ Status Conv::Compute(OpKernelContext* context) const { const int64_t M = W->Shape()[0]; const int group_mkl = static_cast(onnxruntime::ConvBase::group_); - ONNXRUNTIME_RETURN_IF_ERROR(onnxruntime::ConvBase::ValidateInputShape(X, W)); + ORT_RETURN_IF_ERROR(onnxruntime::ConvBase::ValidateInputShape(X, W)); std::vector kernel_shape = onnxruntime::ConvBase::ComputeKernelShape(W->Shape()); const size_t kernel_rank = kernel_shape.size(); @@ -276,14 +276,14 @@ Status Conv::Compute(OpKernelContext* context) const { } if (kernel_rank + 2 != W->Shape().NumDimensions()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape num_dims is not compatible with W num_dims.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape num_dims is not compatible with W num_dims.", " kernel_shape: ", TensorShape(kernel_shape).ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } for (size_t i = 0; i < kernel_rank; ++i) { if (kernel_shape[i] != W->Shape()[i + 2]) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape is not compatible with W shape.", + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "kernel_shape is not compatible with W shape.", " kernel_shape: ", TensorShape(kernel_shape).ToString().c_str(), " W: ", W->Shape().ToString().c_str()); } @@ -305,7 +305,7 @@ Status Conv::Compute(OpKernelContext* context) const { std::vector Y_dims; Y_dims.insert(Y_dims.begin(), {N, M}); TensorShape input_shape = X->Shape().Slice(2); - ONNXRUNTIME_RETURN_IF_ERROR(onnxruntime::ConvBase::InferOutputShape(input_shape, kernel_shape, strides, dilations, &pads, &Y_dims)); + ORT_RETURN_IF_ERROR(onnxruntime::ConvBase::InferOutputShape(input_shape, kernel_shape, strides, dilations, &pads, &Y_dims)); Tensor* Y = context->Output(0, TensorShape(Y_dims)); TensorShape output_shape = Y->Shape().Slice(2); @@ -334,7 +334,7 @@ Status Conv::Compute(OpKernelContext* context) const { } AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); IAllocatorUniquePtr src_reorder_buffer; IAllocatorUniquePtr filter_reorder_buffer; IAllocatorUniquePtr dst_reorder_buffer; @@ -438,7 +438,7 @@ Status Conv::Compute(OpKernelContext* context) const { } } catch (mkldnn::error& e) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Status: ", e.status, ", message: ", e.message.c_str()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Status: ", e.status, ", message: ", e.message.c_str()); } return Status::OK(); diff --git a/onnxruntime/core/providers/mkldnn/nn/lrn.cc b/onnxruntime/core/providers/mkldnn/nn/lrn.cc index 34da6851dddee..e1a7b6147d1c8 100644 --- a/onnxruntime/core/providers/mkldnn/nn/lrn.cc +++ b/onnxruntime/core/providers/mkldnn/nn/lrn.cc @@ -175,7 +175,7 @@ Status LRN::Compute(OpKernelContext* context) const { const TensorShape& x_shape = X->Shape(); if (x_shape.NumDimensions() != 4) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Support NCHW image only."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Support NCHW image only."); } const auto& x_dims = x_shape.GetDims(); @@ -185,7 +185,7 @@ Status LRN::Compute(OpKernelContext* context) const { T* dst_data = Y->template MutableData(); AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); IAllocatorUniquePtr src_reorder_buffer; IAllocatorUniquePtr dst_reorder_buffer; @@ -232,7 +232,7 @@ Status LRN::Compute(OpKernelContext* context) const { DoReorder(params); } } catch (mkldnn::error& e) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Status: ", e.status, ", message: ", e.message.c_str()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Status: ", e.status, ", message: ", e.message.c_str()); } return Status::OK(); diff --git a/onnxruntime/core/providers/mkldnn/nn/pool.cc b/onnxruntime/core/providers/mkldnn/nn/pool.cc index bda1c301c52b7..460c7c7aef9d1 100644 --- a/onnxruntime/core/providers/mkldnn/nn/pool.cc +++ b/onnxruntime/core/providers/mkldnn/nn/pool.cc @@ -236,7 +236,7 @@ Status Pool::Compute(OpKernelContext* context) const { const auto& x_dims = x_shape.GetDims(); if (x_shape.NumDimensions() < 3) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input dimension cannot be less than 3."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Input dimension cannot be less than 3."); } if (x_shape.NumDimensions() == 3) { @@ -276,7 +276,7 @@ Status Pool::Compute(OpKernelContext* context) const { mkldnn::memory::dims padding_right_mkl(pads.begin() + (pads.size() / 2), pads.end()); AllocatorPtr alloc; - ONNXRUNTIME_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); IAllocatorUniquePtr src_reorder_buffer; IAllocatorUniquePtr dst_reorder_buffer; @@ -327,7 +327,7 @@ Status Pool::Compute(OpKernelContext* context) const { DoReorder(params); } } catch (mkldnn::error& e) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Status: ", e.status, ", message: ", e.message.c_str()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Status: ", e.status, ", message: ", e.message.c_str()); } return Status::OK(); diff --git a/onnxruntime/core/providers/mkldnn/symbols.txt b/onnxruntime/core/providers/mkldnn/symbols.txt index c3e14d5b73889..4cc61114a710f 100644 --- a/onnxruntime/core/providers/mkldnn/symbols.txt +++ b/onnxruntime/core/providers/mkldnn/symbols.txt @@ -1 +1 @@ -ONNXRuntimeCreateMkldnnExecutionProviderFactory +OrtCreateMkldnnExecutionProviderFactory diff --git a/onnxruntime/core/session/CustomOpsLoader.cc b/onnxruntime/core/session/CustomOpsLoader.cc index 5d5dc9c5a848a..981987c8e0fea 100644 --- a/onnxruntime/core/session/CustomOpsLoader.cc +++ b/onnxruntime/core/session/CustomOpsLoader.cc @@ -76,12 +76,12 @@ Status CustomOpsLoader::LoadCustomOps(const std::string& dso_file_path, using GetAllKernelsFn = KernelsContainer* (*)(); using GetAllSchemasFn = SchemasContainer* (*)(); void* lib_handle = nullptr; - ONNXRUNTIME_RETURN_IF_ERROR(Env::Default().LoadDynamicLibrary(dso_file_path, &lib_handle)); + ORT_RETURN_IF_ERROR(Env::Default().LoadDynamicLibrary(dso_file_path, &lib_handle)); dso_name_data_map_[dso_file_path].lib_handle = lib_handle; // get symbol for GetAllKernels void* get_all_kernels_symbol_handle = nullptr; - ONNXRUNTIME_RETURN_IF_ERROR(Env::Default().GetSymbolFromLibrary(lib_handle, + ORT_RETURN_IF_ERROR(Env::Default().GetSymbolFromLibrary(lib_handle, kGetAllKernelsSymbol, &get_all_kernels_symbol_handle)); if (!get_all_kernels_symbol_handle) { @@ -102,12 +102,12 @@ Status CustomOpsLoader::LoadCustomOps(const std::string& dso_file_path, custom_registry = std::make_shared(); for (auto& i : kernels_container->kernels_list) { - ONNXRUNTIME_RETURN_IF_ERROR(custom_registry->RegisterCustomKernel(i)); + ORT_RETURN_IF_ERROR(custom_registry->RegisterCustomKernel(i)); } // get symbol for GetAllSchemas void* get_all_schemas_symbol_handle = nullptr; - ONNXRUNTIME_RETURN_IF_ERROR(Env::Default().GetSymbolFromLibrary(lib_handle, + ORT_RETURN_IF_ERROR(Env::Default().GetSymbolFromLibrary(lib_handle, kGetAllSchemasSymbol, &get_all_schemas_symbol_handle)); @@ -124,7 +124,7 @@ Status CustomOpsLoader::LoadCustomOps(const std::string& dso_file_path, dso_name_data_map_[dso_file_path].schemas_container = schemas_container; // register the schemas if present - ONNXRUNTIME_RETURN_IF_ERROR(custom_registry->RegisterOpSet(schemas_container->schemas_list, + ORT_RETURN_IF_ERROR(custom_registry->RegisterOpSet(schemas_container->schemas_list, schemas_container->domain, schemas_container->baseline_opset_version, schemas_container->opset_version)); diff --git a/onnxruntime/core/session/CustomOpsLoader.h b/onnxruntime/core/session/CustomOpsLoader.h index db9e790cf950c..1794a9d6c6076 100644 --- a/onnxruntime/core/session/CustomOpsLoader.h +++ b/onnxruntime/core/session/CustomOpsLoader.h @@ -32,6 +32,6 @@ class CustomOpsLoader final { }; std::map dso_name_data_map_; - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CustomOpsLoader); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CustomOpsLoader); }; } // namespace onnxruntime diff --git a/onnxruntime/core/session/IOBinding.cc b/onnxruntime/core/session/IOBinding.cc index 1387ab045f014..67a33be805612 100644 --- a/onnxruntime/core/session/IOBinding.cc +++ b/onnxruntime/core/session/IOBinding.cc @@ -17,7 +17,7 @@ common::Status IOBinding::BindInput(const std::string& name, const MLValue& ml_v } MLValue new_mlvalue; - ONNXRUNTIME_RETURN_IF_ERROR(CopyOneInputAcrossDevices(session_state_, name, ml_value, new_mlvalue)); + ORT_RETURN_IF_ERROR(CopyOneInputAcrossDevices(session_state_, name, ml_value, new_mlvalue)); feeds_[name] = new_mlvalue; return Status::OK(); } @@ -27,12 +27,12 @@ static common::Status AllocateHelper(const SessionState& session_state, const MLValue& fetched_mlvalue, MLValue& output_mlvalue) { auto* p_provider = session_state.GetExecutionProviders().Get(provider_type); - ONNXRUNTIME_ENFORCE(p_provider); - auto allocator = p_provider->GetAllocator(id, ONNXRuntimeMemTypeDefault); - ONNXRUNTIME_ENFORCE(allocator != nullptr); + ORT_ENFORCE(p_provider); + auto allocator = p_provider->GetAllocator(id, OrtMemTypeDefault); + ORT_ENFORCE(allocator != nullptr); auto& fetched_tensor = fetched_mlvalue.Get(); void* buffer = allocator->Alloc(fetched_tensor.Size()); - ONNXRUNTIME_ENFORCE(buffer); + ORT_ENFORCE(buffer); std::unique_ptr p_tensor = std::make_unique(fetched_tensor.DataType(), fetched_tensor.Shape(), buffer, @@ -54,7 +54,7 @@ common::Status IOBinding::CopyOneInputAcrossDevices(const SessionState& session_ //TODO: make it configurable const int target_device_id = 0; std::vector node_info_vec; - ONNXRUNTIME_RETURN_IF_ERROR(session_state.GetInputNodeInfo(input_name, node_info_vec)); + ORT_RETURN_IF_ERROR(session_state.GetInputNodeInfo(input_name, node_info_vec)); for (auto& node_info : node_info_vec) { size_t index = node_info.index; @@ -77,33 +77,33 @@ common::Status IOBinding::CopyOneInputAcrossDevices(const SessionState& session_ auto* p_input_provider = exec_providers.Get(input_tensor_loc); if (!p_input_provider) { p_input_provider = exec_providers.Get(onnxruntime::kCpuExecutionProvider); - ONNXRUNTIME_ENFORCE(p_input_provider); + ORT_ENFORCE(p_input_provider); } auto input_provider_type = p_input_provider->Type(); - if (input_provider_type == required_provider_type && input_tensor_loc.mem_type == ONNXRuntimeMemTypeDefault) { + if (input_provider_type == required_provider_type && input_tensor_loc.mem_type == OrtMemTypeDefault) { new_mlvalue = orig_mlvalue; return Status::OK(); } //If node require input on cpu and input tensor is allocated with pinned memory allocator, don't do copy - if (node_input_on_cpu && (input_tensor_loc.mem_type == ONNXRuntimeMemTypeCPU || input_tensor_loc.mem_type == ONNXRuntimeMemTypeCPUOutput)) { + if (node_input_on_cpu && (input_tensor_loc.mem_type == OrtMemTypeCPU || input_tensor_loc.mem_type == OrtMemTypeCPUOutput)) { new_mlvalue = orig_mlvalue; return Status::OK(); } auto* node_provider = exec_providers.Get(required_provider_type); - ONNXRUNTIME_ENFORCE(node_provider); - ONNXRUNTIME_RETURN_IF_ERROR(AllocateHelper(session_state, target_device_id, required_provider_type, orig_mlvalue, new_mlvalue)); + ORT_ENFORCE(node_provider); + ORT_RETURN_IF_ERROR(AllocateHelper(session_state, target_device_id, required_provider_type, orig_mlvalue, new_mlvalue)); auto* new_tensor = new_mlvalue.GetMutable(); auto* node_exec_provider = exec_providers.Get(required_provider_type); - ONNXRUNTIME_ENFORCE(node_exec_provider); + ORT_ENFORCE(node_exec_provider); // our CPU exec provider doesn't support copy from GPU->CPU if (required_provider_type != onnxruntime::kCpuExecutionProvider) { - ONNXRUNTIME_RETURN_IF_ERROR(node_exec_provider->CopyTensor(input_tensor, *new_tensor)); + ORT_RETURN_IF_ERROR(node_exec_provider->CopyTensor(input_tensor, *new_tensor)); } else { - ONNXRUNTIME_RETURN_IF_ERROR(p_input_provider->CopyTensor(input_tensor, *new_tensor)); + ORT_RETURN_IF_ERROR(p_input_provider->CopyTensor(input_tensor, *new_tensor)); } } @@ -126,18 +126,18 @@ static common::Status SyncProviders(const SessionState::NameNodeInfoMapType& nod continue; } - ONNXRUNTIME_RETURN_IF_ERROR(p_provider->Sync()); + ORT_RETURN_IF_ERROR(p_provider->Sync()); } return Status::OK(); } common::Status IOBinding::SynchronizeInputs() { - ONNXRUNTIME_RETURN_IF_ERROR(SyncProviders(session_state_.GetInputNodeInfoMap(), session_state_)); + ORT_RETURN_IF_ERROR(SyncProviders(session_state_.GetInputNodeInfoMap(), session_state_)); return Status::OK(); } common::Status IOBinding::SynchronizeOutputs() { - ONNXRUNTIME_RETURN_IF_ERROR(SyncProviders(session_state_.GetOutputNodeInfoMap(), session_state_)); + ORT_RETURN_IF_ERROR(SyncProviders(session_state_.GetOutputNodeInfoMap(), session_state_)); return Status::OK(); } @@ -176,15 +176,15 @@ const std::unordered_map& IOBinding::GetInputs() const { AllocatorPtr IOBinding::GetCPUAllocator(int id, onnxruntime::ProviderType provider_type) const { auto& exec_providers = session_state_.GetExecutionProviders(); auto* p_provider = exec_providers.Get(provider_type); - ONNXRUNTIME_ENFORCE(p_provider); - auto allocator = p_provider->GetAllocator(id, ONNXRuntimeMemTypeCPU); + ORT_ENFORCE(p_provider); + auto allocator = p_provider->GetAllocator(id, OrtMemTypeCPU); // if the provider does not implement CPU allocator, fall back to CPU if (allocator) return allocator; auto* cpu_provider = exec_providers.Get(onnxruntime::kCpuExecutionProvider); - return cpu_provider->GetAllocator(0, ONNXRuntimeMemTypeDefault); + return cpu_provider->GetAllocator(0, OrtMemTypeDefault); } } // namespace onnxruntime diff --git a/onnxruntime/core/session/IOBinding.h b/onnxruntime/core/session/IOBinding.h index c927a76b7a707..06e1c2974891f 100644 --- a/onnxruntime/core/session/IOBinding.h +++ b/onnxruntime/core/session/IOBinding.h @@ -43,7 +43,7 @@ class IOBinding { * If the input mlvalue is not at the desired location (specified by the execution provider), this will * copy it to the desired location. This copy may or may not be async. It depends on the exec provider. * If the input mlvalue is not at the desired location, it should be preallocated - * If the input mlvalue isn't preallocated, it should have memtype of ONNXRuntimeMemTypeDefault + * If the input mlvalue isn't preallocated, it should have memtype of OrtMemTypeDefault * For copying it leverages IExecutionProvider::CopyTensor(). */ common::Status BindInput(const std::string& name, const MLValue& ml_value); @@ -91,6 +91,6 @@ class IOBinding { const MLValue& orig_mlvalue, MLValue& new_mlvalue); - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(IOBinding); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(IOBinding); }; } // namespace onnxruntime diff --git a/onnxruntime/core/session/abi_session_options.cc b/onnxruntime/core/session/abi_session_options.cc index 07e6204bf9711..d7484f0f5e225 100644 --- a/onnxruntime/core/session/abi_session_options.cc +++ b/onnxruntime/core/session/abi_session_options.cc @@ -7,53 +7,53 @@ #include "core/session/inference_session.h" #include "abi_session_options_impl.h" -ONNXRuntimeSessionOptions::~ONNXRuntimeSessionOptions() { +OrtSessionOptions::~OrtSessionOptions() { assert(ref_count == 0); - for (ONNXRuntimeProviderFactoryInterface** p : provider_factories) { - ONNXRuntimeReleaseObject(p); + for (OrtProviderFactoryInterface** p : provider_factories) { + OrtReleaseObject(p); } } -ONNXRuntimeSessionOptions& ONNXRuntimeSessionOptions::operator=(const ONNXRuntimeSessionOptions&) { +OrtSessionOptions& OrtSessionOptions::operator=(const OrtSessionOptions&) { throw std::runtime_error("not implemented"); } -ONNXRuntimeSessionOptions::ONNXRuntimeSessionOptions(const ONNXRuntimeSessionOptions& other) +OrtSessionOptions::OrtSessionOptions(const OrtSessionOptions& other) : value(other.value), custom_op_paths(other.custom_op_paths), provider_factories(other.provider_factories) { - for (ONNXRuntimeProviderFactoryInterface** p : other.provider_factories) { - ONNXRuntimeAddRefToObject(p); + for (OrtProviderFactoryInterface** p : other.provider_factories) { + OrtAddRefToObject(p); } } -ONNXRUNTIME_API(ONNXRuntimeSessionOptions*, ONNXRuntimeCreateSessionOptions) { - std::unique_ptr options = std::make_unique(); +ORT_API(OrtSessionOptions*, OrtCreateSessionOptions) { + std::unique_ptr options = std::make_unique(); return options.release(); } -ONNXRUNTIME_API(ONNXRuntimeSessionOptions*, ONNXRuntimeCloneSessionOptions, ONNXRuntimeSessionOptions* input) { +ORT_API(OrtSessionOptions*, OrtCloneSessionOptions, OrtSessionOptions* input) { try { - return new ONNXRuntimeSessionOptions(*input); + return new OrtSessionOptions(*input); } catch (std::exception&) { return nullptr; } } -ONNXRUNTIME_API(void, ONNXRuntimeSessionOptionsAppendExecutionProvider, _In_ ONNXRuntimeSessionOptions* options, _In_ ONNXRuntimeProviderFactoryInterface** f) { - ONNXRuntimeAddRefToObject(f); +ORT_API(void, OrtSessionOptionsAppendExecutionProvider, _In_ OrtSessionOptions* options, _In_ OrtProviderFactoryInterface** f) { + OrtAddRefToObject(f); options->provider_factories.push_back(f); } -ONNXRUNTIME_API(void, ONNXRuntimeEnableSequentialExecution, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtEnableSequentialExecution, _In_ OrtSessionOptions* options) { options->value.enable_sequential_execution = true; } -ONNXRUNTIME_API(void, ONNXRuntimeDisableSequentialExecution, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtDisableSequentialExecution, _In_ OrtSessionOptions* options) { options->value.enable_sequential_execution = false; } // enable profiling for this session. -ONNXRUNTIME_API(void, ONNXRuntimeEnableProfiling, _In_ ONNXRuntimeSessionOptions* options, _In_ const char* profile_file_prefix) { +ORT_API(void, OrtEnableProfiling, _In_ OrtSessionOptions* options, _In_ const char* profile_file_prefix) { options->value.enable_profiling = true; options->value.profile_file_prefix = profile_file_prefix; } -ONNXRUNTIME_API(void, ONNXRuntimeDisableProfiling, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtDisableProfiling, _In_ OrtSessionOptions* options) { options->value.enable_profiling = false; options->value.profile_file_prefix.clear(); } @@ -62,41 +62,41 @@ ONNXRUNTIME_API(void, ONNXRuntimeDisableProfiling, _In_ ONNXRuntimeSessionOption // The idea is if the input shapes are the same, we could trace the internal memory allocation // and generate a memory pattern for future request. So next time we could just do one allocation // with a big chunk for all the internal memory allocation. -ONNXRUNTIME_API(void, ONNXRuntimeEnableMemPattern, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtEnableMemPattern, _In_ OrtSessionOptions* options) { options->value.enable_mem_pattern = true; } -ONNXRUNTIME_API(void, ONNXRuntimeDisableMemPattern, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtDisableMemPattern, _In_ OrtSessionOptions* options) { options->value.enable_mem_pattern = false; } // enable the memory arena on CPU // Arena may pre-allocate memory for future usage. // set this option to false if you don't want it. -ONNXRUNTIME_API(void, ONNXRuntimeEnableCpuMemArena, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtEnableCpuMemArena, _In_ OrtSessionOptions* options) { options->value.enable_cpu_mem_arena = true; } -ONNXRUNTIME_API(void, ONNXRuntimeDisableCpuMemArena, _In_ ONNXRuntimeSessionOptions* options) { +ORT_API(void, OrtDisableCpuMemArena, _In_ OrtSessionOptions* options) { options->value.enable_cpu_mem_arena = false; } ///< logger id to use for session output -ONNXRUNTIME_API(void, ONNXRuntimeSetSessionLogId, _In_ ONNXRuntimeSessionOptions* options, const char* logid) { +ORT_API(void, OrtSetSessionLogId, _In_ OrtSessionOptions* options, const char* logid) { options->value.session_logid = logid; } ///< applies to session load, initialization, etc -ONNXRUNTIME_API(void, ONNXRuntimeSetSessionLogVerbosityLevel, _In_ ONNXRuntimeSessionOptions* options, uint32_t session_log_verbosity_level) { +ORT_API(void, OrtSetSessionLogVerbosityLevel, _In_ OrtSessionOptions* options, uint32_t session_log_verbosity_level) { options->value.session_log_verbosity_level = session_log_verbosity_level; } ///How many threads in the session thread pool. -ONNXRUNTIME_API(int, ONNXRuntimeSetSessionThreadPoolSize, _In_ ONNXRuntimeSessionOptions* options, int session_thread_pool_size) { +ORT_API(int, OrtSetSessionThreadPoolSize, _In_ OrtSessionOptions* options, int session_thread_pool_size) { if (session_thread_pool_size <= 0) return -1; options->value.session_thread_pool_size = session_thread_pool_size; return 0; } -ONNXRUNTIME_API(void, ONNXRuntimeAddCustomOp, _In_ ONNXRuntimeSessionOptions* options, const char* custom_op_path) { +ORT_API(void, OrtAddCustomOp, _In_ OrtSessionOptions* options, const char* custom_op_path) { options->custom_op_paths.emplace_back(custom_op_path); } diff --git a/onnxruntime/core/session/abi_session_options_impl.h b/onnxruntime/core/session/abi_session_options_impl.h index f26cbaed569d4..3d1aa52f01aac 100644 --- a/onnxruntime/core/session/abi_session_options_impl.h +++ b/onnxruntime/core/session/abi_session_options_impl.h @@ -10,12 +10,12 @@ #include "core/session/inference_session.h" #include "core/session/onnxruntime_c_api.h" -struct ONNXRuntimeSessionOptions : public onnxruntime::ObjectBase { +struct OrtSessionOptions : public onnxruntime::ObjectBase { onnxruntime::SessionOptions value; std::vector custom_op_paths; - std::vector provider_factories; - ONNXRuntimeSessionOptions() = default; - ~ONNXRuntimeSessionOptions(); - ONNXRuntimeSessionOptions(const ONNXRuntimeSessionOptions& other); - ONNXRuntimeSessionOptions& operator=(const ONNXRuntimeSessionOptions& other); + std::vector provider_factories; + OrtSessionOptions() = default; + ~OrtSessionOptions(); + OrtSessionOptions(const OrtSessionOptions& other); + OrtSessionOptions& operator=(const OrtSessionOptions& other); }; diff --git a/onnxruntime/core/session/allocator_impl.h b/onnxruntime/core/session/allocator_impl.h index e4ec3de138cc7..cd28f5b416b22 100644 --- a/onnxruntime/core/session/allocator_impl.h +++ b/onnxruntime/core/session/allocator_impl.h @@ -8,7 +8,7 @@ namespace onnxruntime { class AllocatorWrapper : public IAllocator { public: - AllocatorWrapper(ONNXRuntimeAllocator* impl) : impl_(impl) { + AllocatorWrapper(OrtAllocator* impl) : impl_(impl) { (*impl)->parent.AddRef(impl); } ~AllocatorWrapper() { @@ -20,11 +20,11 @@ class AllocatorWrapper : public IAllocator { void Free(void* p) override { return (*impl_)->Free(impl_, p); } - const ONNXRuntimeAllocatorInfo& Info() const override { - return *(ONNXRuntimeAllocatorInfo*)(*impl_)->Info(impl_); + const OrtAllocatorInfo& Info() const override { + return *(OrtAllocatorInfo*)(*impl_)->Info(impl_); } private: - ONNXRuntimeAllocator* impl_; + OrtAllocator* impl_; }; } // namespace onnxruntime diff --git a/onnxruntime/core/session/default_cpu_allocator_c_api.cc b/onnxruntime/core/session/default_cpu_allocator_c_api.cc index 11b4ac58b1e79..4267a7ce83cbe 100644 --- a/onnxruntime/core/session/default_cpu_allocator_c_api.cc +++ b/onnxruntime/core/session/default_cpu_allocator_c_api.cc @@ -5,56 +5,56 @@ #include "core/session/onnxruntime_cxx_api.h" #include -#define ONNXRUNTIME_ALLOCATOR_IMPL_BEGIN(CLASS_NAME) \ - class CLASS_NAME { \ - private: \ - const ONNXRuntimeAllocatorInteface* vtable_ = &table_; \ - std::atomic_int ref_count_; \ - static void* ONNXRUNTIME_API_CALL Alloc_(void* this_ptr, size_t size) { \ - return ((CLASS_NAME*)this_ptr)->Alloc(size); \ - } \ - static void ONNXRUNTIME_API_CALL Free_(void* this_ptr, void* p) { \ - return ((CLASS_NAME*)this_ptr)->Free(p); \ - } \ - static const ONNXRuntimeAllocatorInfo* ONNXRUNTIME_API_CALL Info_(const void* this_ptr) { \ - return ((const CLASS_NAME*)this_ptr)->Info(); \ - } \ - static uint32_t ONNXRUNTIME_API_CALL AddRef_(void* this_) { \ - CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ - return ++this_ptr->ref_count_; \ - } \ - static uint32_t ONNXRUNTIME_API_CALL Release_(void* this_) { \ - CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ - uint32_t ret = --this_ptr->ref_count_; \ - if (ret == 0) \ - delete this_ptr; \ - return 0; \ - } \ - static ONNXRuntimeAllocatorInteface table_; +#define ORT_ALLOCATOR_IMPL_BEGIN(CLASS_NAME) \ + class CLASS_NAME { \ + private: \ + const OrtAllocatorInterface* vtable_ = &table_; \ + std::atomic_int ref_count_; \ + static void* ORT_API_CALL Alloc_(void* this_ptr, size_t size) { \ + return ((CLASS_NAME*)this_ptr)->Alloc(size); \ + } \ + static void ORT_API_CALL Free_(void* this_ptr, void* p) { \ + return ((CLASS_NAME*)this_ptr)->Free(p); \ + } \ + static const OrtAllocatorInfo* ORT_API_CALL Info_(const void* this_ptr) { \ + return ((const CLASS_NAME*)this_ptr)->Info(); \ + } \ + static uint32_t ORT_API_CALL AddRef_(void* this_) { \ + CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ + return ++this_ptr->ref_count_; \ + } \ + static uint32_t ORT_API_CALL Release_(void* this_) { \ + CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ + uint32_t ret = --this_ptr->ref_count_; \ + if (ret == 0) \ + delete this_ptr; \ + return 0; \ + } \ + static OrtAllocatorInterface table_; -#define ONNXRUNTIME_ALLOCATOR_IMPL_END \ - } \ +#define ORT_ALLOCATOR_IMPL_END \ + } \ ; -ONNXRUNTIME_ALLOCATOR_IMPL_BEGIN(ONNXRuntimeDefaultAllocator) +ORT_ALLOCATOR_IMPL_BEGIN(OrtDefaultAllocator) private: -ONNXRuntimeAllocatorInfo* cpuAllocatorInfo; -ONNXRuntimeDefaultAllocator() : ref_count_(1) { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateAllocatorInfo("Cpu", ONNXRuntimeDeviceAllocator, 0, ONNXRuntimeMemTypeDefault, &cpuAllocatorInfo)); +OrtAllocatorInfo* cpuAllocatorInfo; +OrtDefaultAllocator() : ref_count_(1) { + ORT_THROW_ON_ERROR(OrtCreateAllocatorInfo("Cpu", OrtDeviceAllocator, 0, OrtMemTypeDefault, &cpuAllocatorInfo)); } -~ONNXRuntimeDefaultAllocator() { +~OrtDefaultAllocator() { assert(ref_count_ == 0); - ReleaseONNXRuntimeAllocatorInfo(cpuAllocatorInfo); + ReleaseOrtAllocatorInfo(cpuAllocatorInfo); } public: -ONNXRuntimeDefaultAllocator(const ONNXRuntimeDefaultAllocator&) = delete; -ONNXRuntimeDefaultAllocator& operator=(const ONNXRuntimeDefaultAllocator&) = delete; -ONNXRuntimeAllocatorInteface** Upcast() { - return const_cast(&vtable_); +OrtDefaultAllocator(const OrtDefaultAllocator&) = delete; +OrtDefaultAllocator& operator=(const OrtDefaultAllocator&) = delete; +OrtAllocatorInterface** Upcast() { + return const_cast(&vtable_); } -static ONNXRuntimeAllocatorInteface** Create() { - return (ONNXRuntimeAllocatorInteface**)new ONNXRuntimeDefaultAllocator(); +static OrtAllocatorInterface** Create() { + return (OrtAllocatorInterface**)new OrtDefaultAllocator(); } void* Alloc(size_t size) { return ::malloc(size); @@ -62,24 +62,24 @@ void* Alloc(size_t size) { void Free(void* p) { return ::free(p); } -const ONNXRuntimeAllocatorInfo* Info() const { +const OrtAllocatorInfo* Info() const { return cpuAllocatorInfo; } -ONNXRUNTIME_ALLOCATOR_IMPL_END +ORT_ALLOCATOR_IMPL_END #define API_IMPL_BEGIN try { -#define API_IMPL_END \ - } \ - catch (std::exception & ex) { \ - return CreateONNXStatus(ONNXRUNTIME_RUNTIME_EXCEPTION, ex.what()); \ +#define API_IMPL_END \ + } \ + catch (std::exception & ex) { \ + return CreateONNXStatus(ORT_RUNTIME_EXCEPTION, ex.what()); \ } -ONNXRuntimeAllocatorInteface ONNXRuntimeDefaultAllocator::table_ = { - {ONNXRuntimeDefaultAllocator::AddRef_, ONNXRuntimeDefaultAllocator::Release_}, ONNXRuntimeDefaultAllocator::Alloc_, ONNXRuntimeDefaultAllocator::Free_, ONNXRuntimeDefaultAllocator::Info_}; +OrtAllocatorInterface OrtDefaultAllocator::table_ = { + {OrtDefaultAllocator::AddRef_, OrtDefaultAllocator::Release_}, OrtDefaultAllocator::Alloc_, OrtDefaultAllocator::Free_, OrtDefaultAllocator::Info_}; -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateDefaultAllocator, _Out_ ONNXRuntimeAllocator** out) { +ORT_API_STATUS_IMPL(OrtCreateDefaultAllocator, _Out_ OrtAllocator** out) { API_IMPL_BEGIN - *out = ONNXRuntimeDefaultAllocator::Create(); + *out = OrtDefaultAllocator::Create(); return nullptr; API_IMPL_END } diff --git a/onnxruntime/core/session/inference_session.cc b/onnxruntime/core/session/inference_session.cc index 31919bce3fd66..52703a8b2235d 100644 --- a/onnxruntime/core/session/inference_session.cc +++ b/onnxruntime/core/session/inference_session.cc @@ -51,8 +51,8 @@ class InferenceSession::Impl { logging_manager_{logging_manager}, session_state_{execution_providers_}, insert_cast_transformer_{"CastFloat16Transformer"} { - ONNXRUNTIME_ENFORCE(Environment::IsInitialized(), - "Environment must be initialized before creating an InferenceSession."); + ORT_ENFORCE(Environment::IsInitialized(), + "Environment must be initialized before creating an InferenceSession."); InitLogger(logging_manager); @@ -99,11 +99,11 @@ class InferenceSession::Impl { } for (auto& dso_file_path : dso_list) { std::shared_ptr custom_registry; - ONNXRUNTIME_RETURN_IF_ERROR(custom_ops_loader_.LoadCustomOps(dso_file_path, custom_registry)); + ORT_RETURN_IF_ERROR(custom_ops_loader_.LoadCustomOps(dso_file_path, custom_registry)); if (!custom_registry) { return Status(common::ONNXRUNTIME, common::FAIL, "Null custom_registry after loading custom ops."); } - ONNXRUNTIME_RETURN_IF_ERROR(RegisterCustomRegistry(custom_registry)); + ORT_RETURN_IF_ERROR(RegisterCustomRegistry(custom_registry)); } return Status::OK(); } @@ -130,11 +130,11 @@ class InferenceSession::Impl { } std::shared_ptr p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(onnxruntime::Model::Load(model_uri, p_tmp_model, - HasLocalSchema() ? &custom_schema_registries_ : nullptr)); + ORT_RETURN_IF_ERROR(onnxruntime::Model::Load(model_uri, p_tmp_model, + HasLocalSchema() ? &custom_schema_registries_ : nullptr)); model_ = p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); + ORT_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); // all steps complete, mark the model as loaded. is_model_loaded_ = true; @@ -159,11 +159,11 @@ class InferenceSession::Impl { } std::shared_ptr p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(onnxruntime::Model::Load(model_proto, p_tmp_model, - HasLocalSchema() ? &custom_schema_registries_ : nullptr)); + ORT_RETURN_IF_ERROR(onnxruntime::Model::Load(model_proto, p_tmp_model, + HasLocalSchema() ? &custom_schema_registries_ : nullptr)); model_ = p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); + ORT_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); // all steps complete, mark the model as loaded. is_model_loaded_ = true; @@ -190,11 +190,11 @@ class InferenceSession::Impl { } std::shared_ptr p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(onnxruntime::Model::Load(std::move(p_model_proto), p_tmp_model, - HasLocalSchema() ? &custom_schema_registries_ : nullptr)); + ORT_RETURN_IF_ERROR(onnxruntime::Model::Load(std::move(p_model_proto), p_tmp_model, + HasLocalSchema() ? &custom_schema_registries_ : nullptr)); model_ = p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); + ORT_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); // all steps complete, mark the model as loaded. is_model_loaded_ = true; @@ -227,11 +227,11 @@ class InferenceSession::Impl { } std::shared_ptr p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(onnxruntime::Model::Load(model_proto, p_tmp_model, - HasLocalSchema() ? &custom_schema_registries_ : nullptr)); + ORT_RETURN_IF_ERROR(onnxruntime::Model::Load(model_proto, p_tmp_model, + HasLocalSchema() ? &custom_schema_registries_ : nullptr)); model_ = p_tmp_model; - ONNXRUNTIME_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); + ORT_RETURN_IF_ERROR(DoPostLoadProcessing(*model_.get())); // all steps complete, mark the model as loaded. is_model_loaded_ = true; @@ -250,7 +250,7 @@ class InferenceSession::Impl { // memory allocations for a subgraph that are owned by InferenceSession struct SubgraphMemory { std::unique_ptr session_state; - std::map weights_buffers; + std::map weights_buffers; }; /// iterate nodes in graph looking for ones with graph attribute/s @@ -266,7 +266,7 @@ class InferenceSession::Impl { // check if it has a subgraph if (proto.has_g()) { Graph* subgraph = node.GetMutableGraphAttribute(name); - ONNXRUNTIME_ENFORCE(subgraph, "Main Graph instance should have populated all subgraphs when being resolved."); + ORT_ENFORCE(subgraph, "Main Graph instance should have populated all subgraphs when being resolved."); SubgraphMemory subgraph_info; // create SessionState for executing subgraph @@ -277,13 +277,13 @@ class InferenceSession::Impl { SessionStateInitializer initializer{*subgraph, *subgraph_info.session_state, execution_providers_, kernel_registry_manager_, *session_logger_}; - ONNXRUNTIME_RETURN_IF_ERROR( + ORT_RETURN_IF_ERROR( initializer.CreatePlan(graph_transformation_mgr_, insert_cast_transformer_, node.ImplicitInputDefs(), session_options_.enable_sequential_execution)); - ONNXRUNTIME_RETURN_IF_ERROR(initializer.InitializeAndSave(session_state_.GetEnableMemoryPattern(), - subgraph_info.weights_buffers)); + ORT_RETURN_IF_ERROR(initializer.InitializeAndSave(session_state_.GetEnableMemoryPattern(), + subgraph_info.weights_buffers)); // add the subgraph SessionState instance to the parent graph SessionState so it can be retrieved // by Compute() via OpKernelContextInternal. @@ -293,7 +293,7 @@ class InferenceSession::Impl { // &*subgraph_info.session_state); // recurse - ONNXRUNTIME_RETURN_IF_ERROR(InitializeSubgraphSessions(*subgraph, *subgraph_info.session_state)); + ORT_RETURN_IF_ERROR(InitializeSubgraphSessions(*subgraph, *subgraph_info.session_state)); // save subgraph_info as InferenceSession owns these so they remain valid // for the entire InferenceSession. @@ -347,26 +347,26 @@ class InferenceSession::Impl { SessionStateInitializer session_initializer{graph, session_state_, execution_providers_, kernel_registry_manager_, *session_logger_}; - ONNXRUNTIME_RETURN_IF_ERROR(session_initializer.CreatePlan(graph_transformation_mgr_, insert_cast_transformer_, - {}, session_options_.enable_sequential_execution)); + ORT_RETURN_IF_ERROR(session_initializer.CreatePlan(graph_transformation_mgr_, insert_cast_transformer_, + {}, session_options_.enable_sequential_execution)); - ONNXRUNTIME_RETURN_IF_ERROR(session_initializer.InitializeAndSave(session_state_.GetEnableMemoryPattern(), - weights_buffers_)); + ORT_RETURN_IF_ERROR(session_initializer.InitializeAndSave(session_state_.GetEnableMemoryPattern(), + weights_buffers_)); // handle any subgraphs - ONNXRUNTIME_RETURN_IF_ERROR(InitializeSubgraphSessions(graph, session_state_)); + ORT_RETURN_IF_ERROR(InitializeSubgraphSessions(graph, session_state_)); is_inited_ = true; LOGS(*session_logger_, INFO) << "Session successfully initialized."; } catch (const NotImplementedException& ex) { - status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Exception during initialization: ", ex.what()); + status = ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Exception during initialization: ", ex.what()); LOGS(*session_logger_, ERROR) << status.ErrorMessage(); } catch (const std::exception& ex) { - status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Exception during initialization: ", ex.what()); + status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Exception during initialization: ", ex.what()); LOGS(*session_logger_, ERROR) << status.ErrorMessage(); } catch (...) { - status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, RUNTIME_EXCEPTION, "Encountered unknown exception in Initialize()"); + status = ORT_MAKE_STATUS(ONNXRUNTIME, RUNTIME_EXCEPTION, "Encountered unknown exception in Initialize()"); LOGS(*session_logger_, ERROR) << status.ErrorMessage(); } @@ -438,8 +438,8 @@ class InferenceSession::Impl { }); if (!missing_required_inputs.empty()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, - "Missing required inputs: ", missing_required_inputs); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "Missing required inputs: ", missing_required_inputs); } bool valid = true; @@ -458,18 +458,18 @@ class InferenceSession::Impl { [&ostr](const std::string& elem) { ostr << elem << " "; }); - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, - "Invalid Feed Input Names:", invalid_names.str(), - ". Valid input names are: ", ostr.str()); + return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, + "Invalid Feed Input Names:", invalid_names.str(), + ". Valid input names are: ", ostr.str()); } return Status::OK(); } common::Status ValidateInputs(const NameMLValMap& feeds) { - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInputNames(feeds)); + ORT_RETURN_IF_ERROR(ValidateInputNames(feeds)); //TODO: It should also validate the input shapes? - ONNXRUNTIME_RETURN_IF_ERROR(ValidateInputTypes(feeds)); + ORT_RETURN_IF_ERROR(ValidateInputTypes(feeds)); return Status::OK(); } @@ -527,10 +527,10 @@ class InferenceSession::Impl { MLValue new_mlvalue; auto& input_name = pair.first; auto& orig_mlvalue = pair.second; - ONNXRUNTIME_RETURN_IF_ERROR(IOBinding::CopyOneInputAcrossDevices(session_state, - input_name, - orig_mlvalue, - new_mlvalue)); + ORT_RETURN_IF_ERROR(IOBinding::CopyOneInputAcrossDevices(session_state, + input_name, + orig_mlvalue, + new_mlvalue)); new_feeds[input_name] = new_mlvalue; } return Status::OK(); @@ -547,7 +547,7 @@ class InferenceSession::Impl { std::set seen_outputs; auto p_graph = session_state_.GetGraphViewer(); - ONNXRUNTIME_ENFORCE(p_graph); + ORT_ENFORCE(p_graph); std::pair found; for (auto& node : p_graph->Nodes()) { // TODO optimize this @@ -617,7 +617,7 @@ class InferenceSession::Impl { auto& def_name = one_def->Name(); size_t idx = found.second; int mlvalue_idx; - ONNXRUNTIME_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(def_name, mlvalue_idx)); + ORT_RETURN_IF_ERROR(mlvalue_name_idx_map.GetIdx(def_name, mlvalue_idx)); if (!weights.count(mlvalue_idx)) { LOGS(*session_logger_, INFO) << "Output with name " << def_name << " is not a weight."; continue; @@ -628,8 +628,8 @@ class InferenceSession::Impl { } if (seen_outputs.size() != output_names.size()) // make sure we've seen all outputs - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "output size mismatch, expected ", output_names.size(), - " got ", seen_outputs.size()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "output size mismatch, expected ", output_names.size(), + " got ", seen_outputs.size()); return Status::OK(); } @@ -642,7 +642,7 @@ class InferenceSession::Impl { if (!p_provider) return Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, "invalid provider_type"); - auto allocator = p_provider->GetAllocator(device_id, ONNXRuntimeMemTypeDefault); + auto allocator = p_provider->GetAllocator(device_id, OrtMemTypeDefault); if (!allocator) return Status(common::ONNXRUNTIME, common::FAIL, "invalid allocator"); @@ -680,7 +680,7 @@ class InferenceSession::Impl { auto* p_fetched_provider = execution_providers_.Get(fetched_tensor_location); if (!p_fetched_provider) { p_fetched_provider = execution_providers_.Get(onnxruntime::kCpuExecutionProvider); - ONNXRUNTIME_ENFORCE(p_fetched_provider); + ORT_ENFORCE(p_fetched_provider); } auto fetched_provider_type = p_fetched_provider->Type(); @@ -688,9 +688,9 @@ class InferenceSession::Impl { auto& output_mlvalue = user_fetches[idx]; if (!output_mlvalue.IsAllocated()) { if (fetched_provider_type != onnxruntime::kCpuExecutionProvider) { - ONNXRUNTIME_RETURN_IF_ERROR(AllocateHelper(onnxruntime::kCpuExecutionProvider, 0, - fetched_tensor, - output_mlvalue)); + ORT_RETURN_IF_ERROR(AllocateHelper(onnxruntime::kCpuExecutionProvider, 0, + fetched_tensor, + output_mlvalue)); } else { user_fetches[idx] = fetched_mlvalue; continue; @@ -702,21 +702,21 @@ class InferenceSession::Impl { auto* p_output_provider = execution_providers_.Get(output_tensor_loc); if (!p_output_provider) { p_output_provider = execution_providers_.Get(onnxruntime::kCpuExecutionProvider); - ONNXRUNTIME_ENFORCE(p_output_provider); + ORT_ENFORCE(p_output_provider); } auto output_provider_type = p_output_provider->Type(); - if (output_provider_type == fetched_provider_type || fetched_tensor_location.mem_type == ONNXRuntimeMemTypeCPUOutput) { + if (output_provider_type == fetched_provider_type || fetched_tensor_location.mem_type == OrtMemTypeCPUOutput) { user_fetches[idx] = fetched_mlvalue; continue; } // our CPU exec provider doesn't support copy from GPU->CPU if (fetched_provider_type != onnxruntime::kCpuExecutionProvider) { - ONNXRUNTIME_RETURN_IF_ERROR(p_fetched_provider->CopyTensor(fetched_tensor, *p_output_tensor)); + ORT_RETURN_IF_ERROR(p_fetched_provider->CopyTensor(fetched_tensor, *p_output_tensor)); } else { - ONNXRUNTIME_RETURN_IF_ERROR(p_output_provider->CopyTensor(fetched_tensor, *p_output_tensor)); + ORT_RETURN_IF_ERROR(p_output_provider->CopyTensor(fetched_tensor, *p_output_tensor)); } } @@ -739,10 +739,10 @@ class InferenceSession::Impl { } } - ONNXRUNTIME_CHECK_AND_SET_RETVAL(ValidateInputs(feeds)); + ORT_CHECK_AND_SET_RETVAL(ValidateInputs(feeds)); // if the output vector is non-empty, ensure that its the same size as the output_names - ONNXRUNTIME_CHECK_AND_SET_RETVAL(ValidateOutputs(output_names, p_fetches)); + ORT_CHECK_AND_SET_RETVAL(ValidateOutputs(output_names, p_fetches)); if (!run_options.run_tag.empty()) { LOGS(*session_logger_, INFO) << "Running with tag: " << run_options.run_tag; @@ -760,13 +760,13 @@ class InferenceSession::Impl { // info all execution providers InferenceSession:Run started // TODO: only call OnRunStart for all providers in-use for (auto& xp : execution_providers_) - ONNXRUNTIME_CHECK_AND_SET_RETVAL(xp->OnRunStart()); + ORT_CHECK_AND_SET_RETVAL(xp->OnRunStart()); NameMLValMap copied_feeds; - ONNXRUNTIME_CHECK_AND_SET_RETVAL(CopyInputsAcrossDevices(session_state_, feeds, copied_feeds)); + ORT_CHECK_AND_SET_RETVAL(CopyInputsAcrossDevices(session_state_, feeds, copied_feeds)); std::vector new_fetches; - ONNXRUNTIME_CHECK_AND_SET_RETVAL(MatchOutputsWithProviders(output_names, *p_fetches, new_fetches)); + ORT_CHECK_AND_SET_RETVAL(MatchOutputsWithProviders(output_names, *p_fetches, new_fetches)); std::unique_ptr p_exec; @@ -778,8 +778,8 @@ class InferenceSession::Impl { } } - ONNXRUNTIME_CHECK_AND_SET_RETVAL(p_exec->Execute(session_state_, copied_feeds, output_names, new_fetches, run_logger)); - ONNXRUNTIME_CHECK_AND_SET_RETVAL(CopyOutputsAcrossDevices(new_fetches, *p_fetches)); + ORT_CHECK_AND_SET_RETVAL(p_exec->Execute(session_state_, copied_feeds, output_names, new_fetches, run_logger)); + ORT_CHECK_AND_SET_RETVAL(CopyOutputsAcrossDevices(new_fetches, *p_fetches)); } catch (const std::exception& e) { retval = Status(common::ONNXRUNTIME, common::FAIL, e.what()); @@ -789,7 +789,7 @@ class InferenceSession::Impl { // info all execution providers InferenceSession:Run ended for (auto& xp : execution_providers_) - ONNXRUNTIME_CHECK_AND_SET_RETVAL(xp->OnRunEnd()); + ORT_CHECK_AND_SET_RETVAL(xp->OnRunEnd()); --current_num_runs_; session_profiler_.EndTimeAndRecordEvent(profiling::SESSION_EVENT, "model_run", tp); @@ -1006,7 +1006,7 @@ class InferenceSession::Impl { common::Status WaitForNotification(Notification* p_executor_done, int64_t timeout_in_ms) { if (timeout_in_ms > 0) { - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, "timeout_in_ms >0 is not supported"); // TODO + ORT_NOT_IMPLEMENTED(__FUNCTION__, "timeout_in_ms >0 is not supported"); // TODO } p_executor_done->WaitForNotification(); @@ -1075,7 +1075,7 @@ class InferenceSession::Impl { bool is_model_loaded_ = false; // GUARDED_BY(session_mutex_) bool is_inited_ = false; // GUARDED_BY(session_mutex_) - std::map weights_buffers_; + std::map weights_buffers_; InsertCastTransformer insert_cast_transformer_; // memory allocations for any subgraphs diff --git a/onnxruntime/core/session/inference_session.h b/onnxruntime/core/session/inference_session.h index 65a7ef991063f..6eac4581ffa5b 100644 --- a/onnxruntime/core/session/inference_session.h +++ b/onnxruntime/core/session/inference_session.h @@ -268,7 +268,7 @@ class InferenceSession { common::Status Load(std::unique_ptr p_model_proto); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(InferenceSession); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(InferenceSession); class Impl; std::unique_ptr impl_; diff --git a/onnxruntime/core/session/onnxruntime_c_api.cc b/onnxruntime/core/session/onnxruntime_c_api.cc index 6cd0ba637d31c..fe3daab56735a 100644 --- a/onnxruntime/core/session/onnxruntime_c_api.cc +++ b/onnxruntime/core/session/onnxruntime_c_api.cc @@ -38,37 +38,37 @@ using onnxruntime::Tensor; using onnxruntime::ToONNXStatus; using onnxruntime::common::Status; -#define ONNXRUNTIME_API_RETURN_IF_ERROR(expr) \ - do { \ - auto _status = (expr); \ - if (_status) return _status; \ +#define ORT_API_RETURN_IF_ERROR(expr) \ + do { \ + auto _status = (expr); \ + if (_status) return _status; \ } while (0) -struct ONNXRuntimeEnv : public onnxruntime::ObjectBase { +struct OrtEnv : public onnxruntime::ObjectBase { public: Environment* value; LoggingManager* loggingManager; - friend class onnxruntime::ObjectBase; + friend class onnxruntime::ObjectBase; - ONNXRuntimeEnv(Environment* value1, LoggingManager* loggingManager1) : value(value1), loggingManager(loggingManager1) { - ONNXRUNTIME_CHECK_C_OBJECT_LAYOUT; + OrtEnv(Environment* value1, LoggingManager* loggingManager1) : value(value1), loggingManager(loggingManager1) { + ORT_CHECK_C_OBJECT_LAYOUT; } /** * This function will call ::google::protobuf::ShutdownProtobufLibrary */ - ~ONNXRuntimeEnv() { + ~OrtEnv() { assert(ref_count == 0); delete loggingManager; delete value; } - ONNXRUNTIME_DISALLOW_COPY_AND_ASSIGNMENT(ONNXRuntimeEnv); + ORT_DISALLOW_COPY_AND_ASSIGNMENT(OrtEnv); }; #define API_IMPL_BEGIN try { -#define API_IMPL_END \ - } \ - catch (std::exception & ex) { \ - return CreateONNXStatus(ONNXRUNTIME_RUNTIME_EXCEPTION, ex.what()); \ +#define API_IMPL_END \ + } \ + catch (std::exception & ex) { \ + return CreateONNXStatus(ORT_RUNTIME_EXCEPTION, ex.what()); \ } #define TENSOR_READ_API_BEGIN \ @@ -83,25 +83,25 @@ struct ONNXRuntimeEnv : public onnxruntime::ObjectBase { class LoggingWrapper : public ISink { public: - LoggingWrapper(ONNXRuntimeLoggingFunction logging_function, void* logger_param) + LoggingWrapper(OrtLoggingFunction logging_function, void* logger_param) : logging_function_{logging_function}, logger_param_{logger_param} { } void SendImpl(const Timestamp& /*timestamp*/ /*timestamp*/, const std::string& logger_id, const Capture& message) override { std::string s = message.Location().ToString(); - logging_function_(logger_param_, static_cast(message.Severity()), message.Category(), + logging_function_(logger_param_, static_cast(message.Severity()), message.Category(), logger_id.c_str(), s.c_str(), message.Message().c_str()); } private: - ONNXRuntimeLoggingFunction logging_function_; + OrtLoggingFunction logging_function_; void* logger_param_; }; -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInitializeWithCustomLogger, ONNXRuntimeLoggingFunction logging_function, - _In_opt_ void* logger_param, ONNXRuntimeLoggingLevel default_warning_level, _In_ const char* logid, - _Out_ ONNXRuntimeEnv** out) { +ORT_API_STATUS_IMPL(OrtInitializeWithCustomLogger, OrtLoggingFunction logging_function, + _In_opt_ void* logger_param, OrtLoggingLevel default_warning_level, _In_ const char* logid, + _Out_ OrtEnv** out) { API_IMPL_BEGIN std::string name = logid; std::unique_ptr logger = std::make_unique(logging_function, logger_param); @@ -112,13 +112,13 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInitializeWithCustomLogger, ONNXRuntimeLo std::unique_ptr env; Status status = Environment::Create(env); if (status.IsOK()) - *out = new ONNXRuntimeEnv(env.release(), default_logging_manager.release()); + *out = new OrtEnv(env.release(), default_logging_manager.release()); return ToONNXStatus(status); API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInitialize, ONNXRuntimeLoggingLevel default_warning_level, - _In_ const char* logid, _Out_ ONNXRuntimeEnv** out) { +ORT_API_STATUS_IMPL(OrtInitialize, OrtLoggingLevel default_warning_level, + _In_ const char* logid, _Out_ OrtEnv** out) { API_IMPL_BEGIN std::string name = logid; auto default_logging_manager = std::make_unique(std::unique_ptr{new CLogSink{}}, @@ -128,12 +128,12 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInitialize, ONNXRuntimeLoggingLevel defau std::unique_ptr env; Status status = Environment::Create(env); if (status.IsOK()) - *out = new ONNXRuntimeEnv(env.release(), default_logging_manager.release()); + *out = new OrtEnv(env.release(), default_logging_manager.release()); return ToONNXStatus(status); API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetStringTensorDataLength, _In_ const ONNXValue* value, _Out_ size_t* out) { +ORT_API_STATUS_IMPL(OrtGetStringTensorDataLength, _In_ const ONNXValue* value, _Out_ size_t* out) { TENSOR_READ_API_BEGIN const auto* src = tensor.Data(); int64_t len = tensor.Shape().Size(); @@ -144,17 +144,17 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetStringTensorDataLength, _In_ const ONN } *out = ret; } else - return CreateONNXStatus(ONNXRUNTIME_INVALID_ARGUMENT, "shape is invalid"); + return CreateONNXStatus(ORT_INVALID_ARGUMENT, "shape is invalid"); return nullptr; API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeFillStringTensor, _In_ ONNXValue* value, _In_ const char* const* s, size_t s_len) { +ORT_API_STATUS_IMPL(OrtFillStringTensor, _In_ ONNXValue* value, _In_ const char* const* s, size_t s_len) { TENSOR_READWRITE_API_BEGIN auto* dst = tensor->MutableData(); auto len = static_cast(tensor->Shape().Size()); if (s_len < len) { - return CreateONNXStatus(ONNXRUNTIME_INVALID_ARGUMENT, "input array is too short"); + return CreateONNXStatus(ORT_INVALID_ARGUMENT, "input array is too short"); } for (size_t i = 0; i != len; ++i) { //allocate and copy @@ -165,7 +165,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeFillStringTensor, _In_ ONNXValue* value, } template -ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, ONNXRuntimeAllocatorInteface** allocator, +ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, OrtAllocatorInterface** allocator, std::unique_ptr* out) { size_t elem_count = 1; std::vector shapes(shape_len); @@ -176,11 +176,11 @@ ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, ONNXRuntimeA size_t size_to_allocate; if (!IAllocator::CalcMemSizeForArray(sizeof(T), elem_count, &size_to_allocate)) { - return CreateONNXStatus(ONNXRUNTIME_FAIL, "not enough memory"); + return CreateONNXStatus(ORT_FAIL, "not enough memory"); } void* p_data = (*allocator)->Alloc(allocator, size_to_allocate); if (p_data == nullptr) - return CreateONNXStatus(ONNXRUNTIME_FAIL, "size overflow"); + return CreateONNXStatus(ORT_FAIL, "size overflow"); *out = std::make_unique(DataTypeImpl::GetType(), onnxruntime::TensorShape(shapes), static_cast(p_data), @@ -194,7 +194,7 @@ ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, ONNXRuntimeA * this function will create a copy of the allocator info */ template -ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, const ONNXRuntimeAllocatorInfo* info, +ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, const OrtAllocatorInfo* info, void* p_data, size_t p_data_len, std::unique_ptr* out) { size_t elem_count = 1; std::vector shapes(shape_len); @@ -205,12 +205,12 @@ ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, const ONNXRu size_t size_to_allocate; if (!IAllocator::CalcMemSizeForArray(sizeof(T), elem_count, &size_to_allocate)) { - return CreateONNXStatus(ONNXRUNTIME_INVALID_ARGUMENT, "size overflow"); + return CreateONNXStatus(ORT_INVALID_ARGUMENT, "size overflow"); } if (size_to_allocate > p_data_len) { std::ostringstream oss; oss << "not enough space: expected " << size_to_allocate << ", got " << p_data_len; - return CreateONNXStatus(ONNXRUNTIME_INVALID_ARGUMENT, oss.str().c_str()); + return CreateONNXStatus(ORT_INVALID_ARGUMENT, oss.str().c_str()); } *out = std::make_unique(DataTypeImpl::GetType(), onnxruntime::TensorShape(shapes), @@ -223,50 +223,50 @@ ONNXStatus* CreateTensorImpl(const size_t* shape, size_t shape_len, const ONNXRu /** * this function will create a copy of the allocator info */ -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateTensorWithDataAsONNXValue, _In_ const ONNXRuntimeAllocatorInfo* info, - _In_ void* p_data, size_t p_data_len, _In_ const size_t* shape, size_t shape_len, - OnnxRuntimeTensorElementDataType type, _Out_ ONNXValue** out) { +ORT_API_STATUS_IMPL(OrtCreateTensorWithDataAsONNXValue, _In_ const OrtAllocatorInfo* info, + _In_ void* p_data, size_t p_data_len, _In_ const size_t* shape, size_t shape_len, + OrtTensorElementDataType type, _Out_ ONNXValue** out) { API_IMPL_BEGIN std::unique_ptr tensor; switch (type) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, info, p_data, p_data_len, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64: case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128: @@ -274,7 +274,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateTensorWithDataAsONNXValue, _In_ con std::ostringstream oss; oss << "type " << type << " is not supported in this function"; std::string errmsg = oss.str(); - return CreateONNXStatus(ONNXRUNTIME_NOT_IMPLEMENTED, errmsg.c_str()); + return CreateONNXStatus(ORT_NOT_IMPLEMENTED, errmsg.c_str()); } } std::unique_ptr value = std::make_unique(); @@ -286,50 +286,50 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateTensorWithDataAsONNXValue, _In_ con API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateTensorAsONNXValue, _Inout_ ONNXRuntimeAllocator* allocator, - _In_ const size_t* shape, size_t shape_len, OnnxRuntimeTensorElementDataType type, - _Out_ ONNXValue** out) { +ORT_API_STATUS_IMPL(OrtCreateTensorAsONNXValue, _Inout_ OrtAllocator* allocator, + _In_ const size_t* shape, size_t shape_len, OrtTensorElementDataType type, + _Out_ ONNXValue** out) { API_IMPL_BEGIN std::unique_ptr tensor; switch (type) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64: - ONNXRUNTIME_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); + ORT_API_RETURN_IF_ERROR(CreateTensorImpl(shape, shape_len, allocator, &tensor)); break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64: case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128: @@ -338,7 +338,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateTensorAsONNXValue, _Inout_ ONNXRunt std::ostringstream oss; oss << "type " << type << " is not supported in this function"; std::string errmsg = oss.str(); - return CreateONNXStatus(ONNXRUNTIME_NOT_IMPLEMENTED, errmsg.c_str()); + return CreateONNXStatus(ORT_NOT_IMPLEMENTED, errmsg.c_str()); } } std::unique_ptr value = std::make_unique(); @@ -351,8 +351,8 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateTensorAsONNXValue, _Inout_ ONNXRunt } template -static ONNXStatus* CreateInferenceSessionImpl(_In_ ONNXRuntimeEnv* env, _In_ T model_path, - _In_ const ONNXRuntimeSessionOptions* options, +static ONNXStatus* CreateInferenceSessionImpl(_In_ OrtEnv* env, _In_ T model_path, + _In_ const OrtSessionOptions* options, _Out_ ONNXSession** out) { API_IMPL_BEGIN auto sess = std::make_unique<::onnxruntime::InferenceSession>(options == nullptr ? onnxruntime::SessionOptions() : options->value, env->loggingManager); @@ -363,8 +363,8 @@ static ONNXStatus* CreateInferenceSessionImpl(_In_ ONNXRuntimeEnv* env, _In_ T m return ToONNXStatus(status); } if (options != nullptr) - for (ONNXRuntimeProviderFactoryInterface** p : options->provider_factories) { - ONNXRuntimeProvider* provider; + for (OrtProviderFactoryInterface** p : options->provider_factories) { + OrtProvider* provider; ONNXStatus* error_code = (*p)->CreateProvider(p, &provider); if (error_code) return error_code; @@ -383,25 +383,25 @@ static ONNXStatus* CreateInferenceSessionImpl(_In_ ONNXRuntimeEnv* env, _In_ T m } #ifdef _WIN32 -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateInferenceSession, _In_ ONNXRuntimeEnv* env, _In_ const wchar_t* model_path, - _In_ const ONNXRuntimeSessionOptions* options, _Out_ ONNXSession** out) { +ORT_API_STATUS_IMPL(OrtCreateInferenceSession, _In_ OrtEnv* env, _In_ const wchar_t* model_path, + _In_ const OrtSessionOptions* options, _Out_ ONNXSession** out) { API_IMPL_BEGIN return CreateInferenceSessionImpl(env, model_path, options, out); API_IMPL_END } #else -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeCreateInferenceSession, _In_ ONNXRuntimeEnv* env, _In_ const char* model_path, - _In_ const ONNXRuntimeSessionOptions* options, _Out_ ONNXSession** out) { +ORT_API_STATUS_IMPL(OrtCreateInferenceSession, _In_ OrtEnv* env, _In_ const char* model_path, + _In_ const OrtSessionOptions* options, _Out_ ONNXSession** out) { API_IMPL_BEGIN return CreateInferenceSessionImpl(env, model_path, options, out); API_IMPL_END } #endif -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunInference, _In_ ONNXSession* sess, - _In_ ONNXRuntimeRunOptions* run_options, - _In_ const char* const* input_names, _In_ const ONNXValue* const* input, size_t input_len, - _In_ const char* const* output_names1, size_t output_names_len, _Out_ ONNXValue** output) { +ORT_API_STATUS_IMPL(OrtRunInference, _In_ ONNXSession* sess, + _In_ OrtRunOptions* run_options, + _In_ const char* const* input_names, _In_ const ONNXValue* const* input, size_t input_len, + _In_ const char* const* output_names1, size_t output_names_len, _Out_ ONNXValue** output) { API_IMPL_BEGIN auto session = reinterpret_cast<::onnxruntime::InferenceSession*>(sess); ::onnxruntime::NameMLValMap in; @@ -410,7 +410,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunInference, _In_ ONNXSession* sess, auto kvp = in.insert(std::make_pair(std::string(input_names[i]), *reinterpret_cast(input[i]))); if (!kvp.second) { - return CreateONNXStatus(ONNXRUNTIME_INVALID_ARGUMENT, "duplicated input name"); + return CreateONNXStatus(ORT_INVALID_ARGUMENT, "duplicated input name"); } ::onnxruntime::MLValue& value = kvp.first->second; if (value.Fence()) @@ -420,7 +420,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunInference, _In_ ONNXSession* sess, std::vector output_names(output_names_len); for (size_t i = 0; i != output_names_len; ++i) { if (output_names1[i] == nullptr || output_names1[i][0] == '\0') { - return CreateONNXStatus(ONNXRUNTIME_INVALID_ARGUMENT, "output name cannot be empty"); + return CreateONNXStatus(ORT_INVALID_ARGUMENT, "output name cannot be empty"); } output_names[i] = output_names1[i]; } @@ -436,7 +436,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunInference, _In_ ONNXSession* sess, } Status status; if (run_options == nullptr) { - ONNXRuntimeRunOptions op; + OrtRunOptions op; status = session->Run(op, in, output_names, &fetches); } else { status = session->Run(*run_options, in, output_names, &fetches); @@ -456,7 +456,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeRunInference, _In_ ONNXSession* sess, API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetTensorMutableData, _In_ ONNXValue* value, _Out_ void** output) { +ORT_API_STATUS_IMPL(OrtGetTensorMutableData, _In_ ONNXValue* value, _Out_ void** output) { TENSOR_READWRITE_API_BEGIN //TODO: test if it's a string tensor *output = tensor->MutableDataRaw(); @@ -464,13 +464,13 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetTensorMutableData, _In_ ONNXValue* val API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetStringTensorContent, _In_ const ONNXValue* value, - _Out_ void* s, size_t s_len, _Out_ size_t* offsets, size_t offsets_len) { +ORT_API_STATUS_IMPL(OrtGetStringTensorContent, _In_ const ONNXValue* value, + _Out_ void* s, size_t s_len, _Out_ size_t* offsets, size_t offsets_len) { TENSOR_READ_API_BEGIN const auto* input = tensor.Data(); auto len = static_cast(tensor.Shape().Size()); if (offsets_len < len) { - return CreateONNXStatus(ONNXRUNTIME_FAIL, "space is not enough"); + return CreateONNXStatus(ORT_FAIL, "space is not enough"); } { size_t ret = 0; @@ -478,7 +478,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetStringTensorContent, _In_ const ONNXVa ret += input[i].size(); } if (s_len < ret) { - return CreateONNXStatus(ONNXRUNTIME_FAIL, "space is not enough"); + return CreateONNXStatus(ORT_FAIL, "space is not enough"); } } size_t f = 0; @@ -493,13 +493,13 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeGetStringTensorContent, _In_ const ONNXVa API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeTensorProtoToONNXValue, _Inout_ ONNXRuntimeAllocator* allocator, - const void* input, int input_len, _Out_ ONNXValue** out) { +ORT_API_STATUS_IMPL(OrtTensorProtoToONNXValue, _Inout_ OrtAllocator* allocator, + const void* input, int input_len, _Out_ ONNXValue** out) { API_IMPL_BEGIN std::shared_ptr allocator_ = std::make_shared(allocator); ::ONNX_NAMESPACE::TensorProto proto; if (!proto.ParseFromArray(input, input_len)) { - return CreateONNXStatus(ONNXRUNTIME_FAIL, "parse input tensor proto failed"); + return CreateONNXStatus(ORT_FAIL, "parse input tensor proto failed"); } std::unique_ptr value = std::make_unique(); Status st = onnxruntime::utils::TensorProtoToMLValue(proto, allocator_, nullptr, 0, *value); @@ -511,16 +511,16 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeTensorProtoToONNXValue, _Inout_ ONNXRunti } #define DEFINE_RELEASE_ONNX_RUNTIME_OBJECT_FUNCTION(INPUT_TYPE, REAL_TYPE) \ - ONNXRUNTIME_API(void, Release##INPUT_TYPE, INPUT_TYPE* value) { \ + ORT_API(void, Release##INPUT_TYPE, INPUT_TYPE* value) { \ delete reinterpret_cast(value); \ } #define DEFINE_RELEASE_ONNX_RUNTIME_OBJECT_FUNCTION_FOR_ARRAY(INPUT_TYPE, REAL_TYPE) \ - ONNXRUNTIME_API(void, Release##INPUT_TYPE, INPUT_TYPE* value) { \ + ORT_API(void, Release##INPUT_TYPE, INPUT_TYPE* value) { \ delete[] reinterpret_cast(value); \ } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetInputCount, _In_ const ONNXSession* sess, _Out_ size_t* out) { +ORT_API_STATUS_IMPL(OrtInferenceSessionGetInputCount, _In_ const ONNXSession* sess, _Out_ size_t* out) { API_IMPL_BEGIN auto session = reinterpret_cast(sess); std::pair p = session->GetModelInputs(); @@ -531,7 +531,7 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetInputCount, _In_ const API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetOutputCount, _In_ const ONNXSession* sess, _Out_ size_t* out) { +ORT_API_STATUS_IMPL(OrtInferenceSessionGetOutputCount, _In_ const ONNXSession* sess, _Out_ size_t* out) { API_IMPL_BEGIN auto session = reinterpret_cast(sess); std::pair p = session->GetModelOutputs(); @@ -542,32 +542,32 @@ ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetOutputCount, _In_ cons API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetInputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct ONNXRuntimeTypeInfo** out) { +ORT_API_STATUS_IMPL(OrtInferenceSessionGetInputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct OrtTypeInfo** out) { API_IMPL_BEGIN auto session = reinterpret_cast(sess); std::pair p = session->GetModelInputs(); if (!p.first.IsOK()) return ToONNXStatus(p.first); if (p.second->size() <= index) - return CreateONNXStatus(ONNXRUNTIME_FAIL, "out of index"); + return CreateONNXStatus(ORT_FAIL, "out of index"); const ONNX_NAMESPACE::TypeProto* type_proto = (*p.second)[index]->TypeAsProto(); - return ONNXRuntimeTypeInfo::FromDataTypeImpl(type_proto, out); + return OrtTypeInfo::FromDataTypeImpl(type_proto, out); API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetOutputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct ONNXRuntimeTypeInfo** out) { +ORT_API_STATUS_IMPL(OrtInferenceSessionGetOutputTypeInfo, _In_ const ONNXSession* sess, size_t index, _Out_ struct OrtTypeInfo** out) { API_IMPL_BEGIN auto session = reinterpret_cast(sess); std::pair p = session->GetModelOutputs(); if (!p.first.IsOK()) return ToONNXStatus(p.first); if (p.second->size() <= index) - return CreateONNXStatus(ONNXRUNTIME_FAIL, "out of index"); + return CreateONNXStatus(ORT_FAIL, "out of index"); const ONNX_NAMESPACE::TypeProto* type_proto = (*p.second)[index]->TypeAsProto(); - return ONNXRuntimeTypeInfo::FromDataTypeImpl(type_proto, out); + return OrtTypeInfo::FromDataTypeImpl(type_proto, out); API_IMPL_END } -static char* StrDup(const std::string& str, ONNXRuntimeAllocator* allocator) { +static char* StrDup(const std::string& str, OrtAllocator* allocator) { char* output_string = reinterpret_cast((*allocator)->Alloc(allocator, str.size() + 1)); memcpy(output_string, str.c_str(), str.size()); output_string[str.size()] = '\0'; @@ -575,27 +575,27 @@ static char* StrDup(const std::string& str, ONNXRuntimeAllocator* allocator) { } static ONNXStatus* GetInputOutputNameImpl(_In_ const ONNXSession* sess, size_t index, - _Inout_ ONNXRuntimeAllocator* allocator, bool is_input, + _Inout_ OrtAllocator* allocator, bool is_input, _Out_ char** output) { auto session = reinterpret_cast(sess); std::pair p = is_input ? session->GetModelInputs() : session->GetModelOutputs(); if (!p.first.IsOK()) return ToONNXStatus(p.first); if (p.second == nullptr) - return CreateONNXStatus(ONNXRUNTIME_FAIL, "internal error"); + return CreateONNXStatus(ORT_FAIL, "internal error"); const InputDefList& defs = *p.second; if (index >= defs.size()) - return CreateONNXStatus(ONNXRUNTIME_FAIL, "index out of range"); + return CreateONNXStatus(ORT_FAIL, "index out of range"); *output = StrDup(defs[index]->Name(), allocator); return nullptr; } -ONNXRUNTIME_API(int, ONNXRuntimeIsTensor, _In_ const ONNXValue* value) { +ORT_API(int, OrtIsTensor, _In_ const ONNXValue* value) { auto v = reinterpret_cast(value); return v->IsTensor() ? 1 : 0; } -ONNXRUNTIME_API(void*, ONNXRuntimeAllocatorAlloc, _Inout_ ONNXRuntimeAllocator* ptr, size_t size) { +ORT_API(void*, OrtAllocatorAlloc, _Inout_ OrtAllocator* ptr, size_t size) { try { return (*ptr)->Alloc(ptr, size); } catch (std::exception&) { @@ -603,14 +603,14 @@ ONNXRUNTIME_API(void*, ONNXRuntimeAllocatorAlloc, _Inout_ ONNXRuntimeAllocator* } } -ONNXRUNTIME_API(void, ONNXRuntimeAllocatorFree, _Inout_ ONNXRuntimeAllocator* ptr, void* p) { +ORT_API(void, OrtAllocatorFree, _Inout_ OrtAllocator* ptr, void* p) { try { (*ptr)->Free(ptr, p); } catch (std::exception&) { } } -ONNXRUNTIME_API(const struct ONNXRuntimeAllocatorInfo*, ONNXRuntimeAllocatorGetInfo, _In_ const ONNXRuntimeAllocator* ptr) { +ORT_API(const struct OrtAllocatorInfo*, OrtAllocatorGetInfo, _In_ const OrtAllocator* ptr) { try { return (*ptr)->Info(ptr); } catch (std::exception&) { @@ -618,15 +618,15 @@ ONNXRUNTIME_API(const struct ONNXRuntimeAllocatorInfo*, ONNXRuntimeAllocatorGetI } } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetInputName, _In_ const ONNXSession* sess, size_t index, - _Inout_ ONNXRuntimeAllocator* allocator, _Out_ char** output) { +ORT_API_STATUS_IMPL(OrtInferenceSessionGetInputName, _In_ const ONNXSession* sess, size_t index, + _Inout_ OrtAllocator* allocator, _Out_ char** output) { API_IMPL_BEGIN return GetInputOutputNameImpl(sess, index, allocator, true, output); API_IMPL_END } -ONNXRUNTIME_API_STATUS_IMPL(ONNXRuntimeInferenceSessionGetOutputName, _In_ const ONNXSession* sess, size_t index, - _Inout_ ONNXRuntimeAllocator* allocator, _Out_ char** output) { +ORT_API_STATUS_IMPL(OrtInferenceSessionGetOutputName, _In_ const ONNXSession* sess, size_t index, + _Inout_ OrtAllocator* allocator, _Out_ char** output) { API_IMPL_BEGIN return GetInputOutputNameImpl(sess, index, allocator, false, output); API_IMPL_END @@ -636,6 +636,6 @@ DEFINE_RELEASE_ONNX_RUNTIME_OBJECT_FUNCTION(ONNXValue, MLValue) DEFINE_RELEASE_ONNX_RUNTIME_OBJECT_FUNCTION(ONNXSession, ::onnxruntime::InferenceSession) DEFINE_RELEASE_ONNX_RUNTIME_OBJECT_FUNCTION_FOR_ARRAY(ONNXStatus, char) -ONNXRUNTIME_API(void, ReleaseONNXEnv, ONNXRuntimeEnv* env) { - ONNXRuntimeReleaseObject(env); +ORT_API(void, ReleaseONNXEnv, OrtEnv* env) { + OrtReleaseObject(env); } diff --git a/onnxruntime/core/util/math_cpu.cc b/onnxruntime/core/util/math_cpu.cc index 9805a17410cac..b3b1fbcb70c4b 100644 --- a/onnxruntime/core/util/math_cpu.cc +++ b/onnxruntime/core/util/math_cpu.cc @@ -102,7 +102,7 @@ void Gemm( A, &lda, &beta, C, &N_); if (status != mkldnn_success) { - ONNXRUNTIME_THROW("mkldnn_sgemm failed with status: ", status); + ORT_THROW("mkldnn_sgemm failed with status: ", status); } #elif defined(USE_MLAS) int lda = (int)((TransA == CblasNoTrans) ? K : M); @@ -127,7 +127,7 @@ void Gemm( ConstEigenMatrixMap(A, K, M)); return; default: - ONNXRUNTIME_THROW("CblasNoTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); + ORT_THROW("CblasNoTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); } } case CblasTrans: { @@ -141,11 +141,11 @@ void Gemm( ConstEigenMatrixMap(A, M, K).transpose()); return; default: - ONNXRUNTIME_THROW("CblasTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); + ORT_THROW("CblasTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); } } default: - ONNXRUNTIME_THROW("Unexpected CBLAS_TRANSPOSE for TransA of ", TransA); + ORT_THROW("Unexpected CBLAS_TRANSPOSE for TransA of ", TransA); } #endif } @@ -175,7 +175,7 @@ void GemmEx( A, &lda, &beta, C, &ldc); if (status != mkldnn_success) { - ONNXRUNTIME_THROW("mkldnn_sgemm failed with status: ", status); + ORT_THROW("mkldnn_sgemm failed with status: ", status); } #elif defined(USE_MLAS) MlasSgemm(TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); @@ -203,7 +203,7 @@ void GemmEx( ConstStridedMap(A, K, M, OuterStride(lda))); return; default: - ONNXRUNTIME_THROW("CblasNoTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); + ORT_THROW("CblasNoTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); } } case CblasTrans: { @@ -219,11 +219,11 @@ void GemmEx( ConstStridedMap(A, M, K, OuterStride(lda)).transpose()); return; default: - ONNXRUNTIME_THROW("CblasTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); + ORT_THROW("CblasTrans Unexpected CBLAS_TRANSPOSE for TransB of ", TransB); } } default: - ONNXRUNTIME_THROW("Unexpected CBLAS_TRANSPOSE for TransA of ", TransA); + ORT_THROW("Unexpected CBLAS_TRANSPOSE for TransA of ", TransA); } #endif } @@ -260,7 +260,7 @@ void Gemv( return; } default: - ONNXRUNTIME_THROW("Gemv float found an unexpected CBLAS_TRANSPOSE input of", TransA); + ORT_THROW("Gemv float found an unexpected CBLAS_TRANSPOSE input of", TransA); } } @@ -745,9 +745,9 @@ void RandUniform( CPUMathUtil* /*provider*/) { std::uniform_real_distribution distribution(a, b); //todo: need implmenet "RandGenerator()" in execution provider - ONNXRUNTIME_UNUSED_PARAMETER(n); - ONNXRUNTIME_UNUSED_PARAMETER(r); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(n); + ORT_UNUSED_PARAMETER(r); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); /*for (int i = 0; i < n; ++i) { r[i] = distribution(context->RandGenerator()); }*/ @@ -759,9 +759,9 @@ void RandUniform( CPUMathUtil* /*provider*/) { std::uniform_int_distribution distribution(a, b); //todo: need implmenet "RandGenerator()" in execution provider - ONNXRUNTIME_UNUSED_PARAMETER(n); - ONNXRUNTIME_UNUSED_PARAMETER(r); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(n); + ORT_UNUSED_PARAMETER(r); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); /*for (int i = 0; i < n; ++i) { r[i] = distribution(context->RandGenerator()); }*/ @@ -806,9 +806,9 @@ void RandGaussian( const int n, const float mean, const float std, float* r, CPUMathUtil* /*provider*/) { std::normal_distribution distribution(mean, std); - ONNXRUNTIME_UNUSED_PARAMETER(n); - ONNXRUNTIME_UNUSED_PARAMETER(r); - ONNXRUNTIME_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); + ORT_UNUSED_PARAMETER(n); + ORT_UNUSED_PARAMETER(r); + ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); /*for (int i = 0; i < n; ++i) { r[i] = distribution(context->RandGenerator()); }*/ @@ -850,7 +850,7 @@ void Select( float* y, CPUMathUtil* /*context*/) { for (int i = 0; i < N; ++i) { - ONNXRUNTIME_ENFORCE(idx[i] < D); + ORT_ENFORCE(idx[i] < D); y[i] = x[i * D + idx[i]]; } } @@ -916,7 +916,7 @@ void Im2colNd( incremented = false; for (int64_t d_i = N - 1; d_i >= 0; --d_i) { const int64_t d_max = col_shape[d_i + 1]; - ONNXRUNTIME_ENFORCE(d_iter[d_i] < d_max); + ORT_ENFORCE(d_iter[d_i] < d_max); if (d_iter[d_i] == d_max - 1) { d_iter[d_i] = 0; } else { // d_iter[d_i] < d_max - 1 diff --git a/onnxruntime/python/onnxruntime_pybind_state.cc b/onnxruntime/python/onnxruntime_pybind_state.cc index 331c080e77bd6..1db3d36f2d9fa 100644 --- a/onnxruntime/python/onnxruntime_pybind_state.cc +++ b/onnxruntime/python/onnxruntime_pybind_state.cc @@ -172,8 +172,8 @@ class SessionObjectInitializer { } }; -inline void RegisterExecutionProvider(InferenceSession* sess, ONNXRuntimeProviderFactoryInterface** f) { - ONNXRuntimeProvider* p; +inline void RegisterExecutionProvider(InferenceSession* sess, OrtProviderFactoryInterface** f) { + OrtProvider* p; (*f)->CreateProvider(f, &p); std::unique_ptr q((onnxruntime::IExecutionProvider*)p); auto status = sess->RegisterExecutionProvider(std::move(q)); @@ -183,14 +183,14 @@ inline void RegisterExecutionProvider(InferenceSession* sess, ONNXRuntimeProvide } #define FACTORY_PTR_HOLDER \ - std::unique_ptr ptr_holder_(f, ONNXRuntimeReleaseObject); + std::unique_ptr ptr_holder_(f, OrtReleaseObject); void InitializeSession(InferenceSession* sess) { onnxruntime::common::Status status; #ifdef USE_CUDA { - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCUDAExecutionProviderFactory(0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateCUDAExecutionProviderFactory(0, &f)); RegisterExecutionProvider(sess, f); FACTORY_PTR_HOLDER; } @@ -199,16 +199,16 @@ void InitializeSession(InferenceSession* sess) { #ifdef USE_MKLDNN { const bool enable_cpu_mem_arena = true; - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateMkldnnExecutionProviderFactory(enable_cpu_mem_arena ? 1 : 0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateMkldnnExecutionProviderFactory(enable_cpu_mem_arena ? 1 : 0, &f)); RegisterExecutionProvider(sess, f); FACTORY_PTR_HOLDER; } #endif #if 0 //USE_NUPHAR { - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateNupharExecutionProviderFactory(0, "", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateNupharExecutionProviderFactory(0, "", &f)); RegisterExecutionProvider(sess, f); FACTORY_PTR_HOLDER; } diff --git a/onnxruntime/test/common/logging/logging_test.cc b/onnxruntime/test/common/logging/logging_test.cc index b9b6ae49df472..828e2304827e9 100644 --- a/onnxruntime/test/common/logging/logging_test.cc +++ b/onnxruntime/test/common/logging/logging_test.cc @@ -49,7 +49,7 @@ class LoggingTestsFixture : public ::testing::Test { std::unique_ptr LoggingTestsFixture::default_logging_manager_; /// -/// Tests that the ONNXRUNTIME_WHERE macro populates all fields correctly. +/// Tests that the ORT_WHERE macro populates all fields correctly. /// TEST_F(LoggingTestsFixture, TestWhereMacro) { const std::string logid{"TestWhereMacro"}; diff --git a/onnxruntime/test/custom_op_shared_lib/test_custom_op.cc b/onnxruntime/test/custom_op_shared_lib/test_custom_op.cc index 7bfc16be70ff1..298c70fa2e2c5 100644 --- a/onnxruntime/test/custom_op_shared_lib/test_custom_op.cc +++ b/onnxruntime/test/custom_op_shared_lib/test_custom_op.cc @@ -31,7 +31,7 @@ class FooKernel : public OpKernel { } }; -ONNX_RUNTIME_EXPORT KernelsContainer* GetAllKernels() { +ORT_EXPORT KernelsContainer* GetAllKernels() { KernelsContainer* kc = new KernelsContainer; KernelDefBuilder def_builder; @@ -46,7 +46,7 @@ ONNX_RUNTIME_EXPORT KernelsContainer* GetAllKernels() { return kc; } -ONNX_RUNTIME_EXPORT SchemasContainer* GetAllSchemas() { +ORT_EXPORT SchemasContainer* GetAllSchemas() { SchemasContainer* sc = new SchemasContainer; sc->domain = onnxruntime::kOnnxDomain; sc->baseline_opset_version = 5; @@ -74,10 +74,10 @@ ONNX_RUNTIME_EXPORT SchemasContainer* GetAllSchemas() { return sc; } -ONNX_RUNTIME_EXPORT void FreeKernelsContainer(KernelsContainer* kc) { +ORT_EXPORT void FreeKernelsContainer(KernelsContainer* kc) { delete kc; } -ONNX_RUNTIME_EXPORT void FreeSchemasContainer(SchemasContainer* sc) { +ORT_EXPORT void FreeSchemasContainer(SchemasContainer* sc) { delete sc; } diff --git a/onnxruntime/test/framework/TestAllocatorManager.cc b/onnxruntime/test/framework/TestAllocatorManager.cc index 499ef7468c5cb..550e877252bb6 100644 --- a/onnxruntime/test/framework/TestAllocatorManager.cc +++ b/onnxruntime/test/framework/TestAllocatorManager.cc @@ -50,13 +50,13 @@ AllocatorManager::AllocatorManager() { Status AllocatorManager::InitializeAllocators() { auto cpu_alocator = std::make_unique(); - ONNXRUNTIME_RETURN_IF_ERROR(RegisterAllocator(map_, std::move(cpu_alocator), std::numeric_limits::max(), true)); + ORT_RETURN_IF_ERROR(RegisterAllocator(map_, std::move(cpu_alocator), std::numeric_limits::max(), true)); #ifdef USE_CUDA auto cuda_alocator = std::make_unique(0); - ONNXRUNTIME_RETURN_IF_ERROR(RegisterAllocator(map_, std::move(cuda_alocator), std::numeric_limits::max(), true)); + ORT_RETURN_IF_ERROR(RegisterAllocator(map_, std::move(cuda_alocator), std::numeric_limits::max(), true)); auto cuda_pinned_alocator = std::make_unique(); - ONNXRUNTIME_RETURN_IF_ERROR(RegisterAllocator(map_, std::move(cuda_pinned_alocator), std::numeric_limits::max(), true)); + ORT_RETURN_IF_ERROR(RegisterAllocator(map_, std::move(cuda_pinned_alocator), std::numeric_limits::max(), true)); #endif // USE_CUDA return Status::OK(); @@ -68,7 +68,7 @@ AllocatorManager::~AllocatorManager() { AllocatorPtr AllocatorManager::GetAllocator(const std::string& name, const int id, bool arena) { auto allocator_id = GetAllocatorId(name, id, arena); auto entry = map_.find(allocator_id); - ONNXRUNTIME_ENFORCE(entry != map_.end(), "Allocator not found:", allocator_id); + ORT_ENFORCE(entry != map_.end(), "Allocator not found:", allocator_id); return entry->second; } } // namespace test diff --git a/onnxruntime/test/framework/TestAllocatorManager.h b/onnxruntime/test/framework/TestAllocatorManager.h index 44e3cb926a2ab..6278d558e57bb 100644 --- a/onnxruntime/test/framework/TestAllocatorManager.h +++ b/onnxruntime/test/framework/TestAllocatorManager.h @@ -19,7 +19,7 @@ class AllocatorManager { AllocatorPtr GetAllocator(const std::string& name, const int id = 0, bool arena = true); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(AllocatorManager); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(AllocatorManager); AllocatorManager(); Status InitializeAllocators(); diff --git a/onnxruntime/test/framework/allocation_planner_test.cc b/onnxruntime/test/framework/allocation_planner_test.cc index c4edb62da1d93..d30b3dd7fa22b 100644 --- a/onnxruntime/test/framework/allocation_planner_test.cc +++ b/onnxruntime/test/framework/allocation_planner_test.cc @@ -49,12 +49,12 @@ class DummyOpKernel : public OpKernel { public: DummyOpKernel(const OpKernelInfo& p) : OpKernel(p) {} Status Compute(OpKernelContext* context) const { - ONNXRUNTIME_UNUSED_PARAMETER(context); + ORT_UNUSED_PARAMETER(context); return Status::OK(); } Status ComputeAsync(OpKernelContext* context, DoneCallback done) const { - ONNXRUNTIME_UNUSED_PARAMETER(context); - ONNXRUNTIME_UNUSED_PARAMETER(done); + ORT_UNUSED_PARAMETER(context); + ORT_UNUSED_PARAMETER(done); return Status::OK(); } }; diff --git a/onnxruntime/test/framework/allocator_test.cc b/onnxruntime/test/framework/allocator_test.cc index 587f753b1b5f7..c0594514f2127 100644 --- a/onnxruntime/test/framework/allocator_test.cc +++ b/onnxruntime/test/framework/allocator_test.cc @@ -9,11 +9,11 @@ namespace onnxruntime { namespace test { TEST(AllocatorTest, CPUAllocatorTest) { - auto cpu_arena = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_arena = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); ASSERT_STREQ(cpu_arena->Info().name, CPU); EXPECT_EQ(cpu_arena->Info().id, 0); - EXPECT_EQ(cpu_arena->Info().type, ONNXRuntimeAllocatorType::ONNXRuntimeArenaAllocator); + EXPECT_EQ(cpu_arena->Info().type, OrtAllocatorType::OrtArenaAllocator); size_t size = 1024; auto bytes = cpu_arena->Alloc(size); @@ -48,8 +48,8 @@ class TestAllocator : public IAllocator { delete p_sizet; } - virtual const ONNXRuntimeAllocatorInfo& Info() const override { - static ONNXRuntimeAllocatorInfo info("test", ONNXRuntimeDeviceAllocator, 0); + virtual const OrtAllocatorInfo& Info() const override { + static OrtAllocatorInfo info("test", OrtDeviceAllocator, 0); return info; } diff --git a/onnxruntime/test/framework/cuda/allocator_cuda_test.cc b/onnxruntime/test/framework/cuda/allocator_cuda_test.cc index dc36bab3e81f8..75b433c045fdb 100644 --- a/onnxruntime/test/framework/cuda/allocator_cuda_test.cc +++ b/onnxruntime/test/framework/cuda/allocator_cuda_test.cc @@ -11,7 +11,7 @@ namespace onnxruntime { namespace test { TEST(AllocatorTest, CUDAAllocatorTest) { int cuda_device_id = 0; - DeviceAllocatorRegistrationInfo default_allocator_info({ONNXRuntimeMemTypeDefault, + DeviceAllocatorRegistrationInfo default_allocator_info({OrtMemTypeDefault, [](int id) { return std::make_unique(id); }, std::numeric_limits::max()}); auto cuda_arena = CreateAllocator(default_allocator_info, cuda_device_id); @@ -20,32 +20,32 @@ TEST(AllocatorTest, CUDAAllocatorTest) { EXPECT_STREQ(cuda_arena->Info().name, CUDA); EXPECT_EQ(cuda_arena->Info().id, cuda_device_id); - EXPECT_EQ(cuda_arena->Info().mem_type, ONNXRuntimeMemTypeDefault); - EXPECT_EQ(cuda_arena->Info().type, ONNXRuntimeArenaAllocator); + EXPECT_EQ(cuda_arena->Info().mem_type, OrtMemTypeDefault); + EXPECT_EQ(cuda_arena->Info().type, OrtArenaAllocator); //test cuda allocation auto cuda_addr = cuda_arena->Alloc(size); EXPECT_TRUE(cuda_addr); - DeviceAllocatorRegistrationInfo pinned_allocator_info({ONNXRuntimeMemTypeCPUOutput, + DeviceAllocatorRegistrationInfo pinned_allocator_info({OrtMemTypeCPUOutput, [](int) { return std::make_unique(); }, std::numeric_limits::max()}); auto pinned_allocator = CreateAllocator(pinned_allocator_info); EXPECT_STREQ(pinned_allocator->Info().name, CUDA_PINNED); EXPECT_EQ(pinned_allocator->Info().id, 0); - EXPECT_EQ(pinned_allocator->Info().mem_type, ONNXRuntimeMemTypeCPUOutput); - EXPECT_EQ(pinned_allocator->Info().type, ONNXRuntimeArenaAllocator); + EXPECT_EQ(pinned_allocator->Info().mem_type, OrtMemTypeCPUOutput); + EXPECT_EQ(pinned_allocator->Info().type, OrtArenaAllocator); //test pinned allocation auto pinned_addr = pinned_allocator->Alloc(size); EXPECT_TRUE(pinned_addr); - const auto& cpu_arena = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + const auto& cpu_arena = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); EXPECT_STREQ(cpu_arena->Info().name, CPU); EXPECT_EQ(cpu_arena->Info().id, 0); - EXPECT_EQ(cpu_arena->Info().mem_type, ONNXRuntimeMemTypeDefault); - EXPECT_EQ(cpu_arena->Info().type, ONNXRuntimeArenaAllocator); + EXPECT_EQ(cpu_arena->Info().mem_type, OrtMemTypeDefault); + EXPECT_EQ(cpu_arena->Info().type, OrtArenaAllocator); auto cpu_addr_a = cpu_arena->Alloc(size); EXPECT_TRUE(cpu_addr_a); diff --git a/onnxruntime/test/framework/cuda/fence_cuda_test.cc b/onnxruntime/test/framework/cuda/fence_cuda_test.cc index 133e9efde4692..06945fb2d60b1 100644 --- a/onnxruntime/test/framework/cuda/fence_cuda_test.cc +++ b/onnxruntime/test/framework/cuda/fence_cuda_test.cc @@ -88,7 +88,7 @@ TEST(CUDAFenceTests, DISABLED_PartOnCPU) { ASSERT_TRUE(graph.Resolve().IsOK()); - auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto element_type = DataTypeImpl::GetType(); TensorShape shape({2, 2}); float data[4] = {-1, 2, 3, -4}; @@ -145,7 +145,7 @@ TEST(CUDAFenceTests, TileWithInitializer) { ASSERT_TRUE(graph.Resolve().IsOK()); ASSERT_TRUE(0 == CountCopyNodes(graph)); - auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto element_type = DataTypeImpl::GetType(); TensorShape shape({2, 2}); float data[4] = {-1, 2, 3, -4}; @@ -211,7 +211,7 @@ TEST(CUDAFenceTests, TileWithComputedInput) { ASSERT_TRUE(graph.Resolve().IsOK()); ASSERT_TRUE(0 == CountCopyNodes(graph)); - auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto element_type = DataTypeImpl::GetType(); TensorShape shape({2, 2}); float data[4] = {-1, 2, 3, -4}; diff --git a/onnxruntime/test/framework/data_types_test.cc b/onnxruntime/test/framework/data_types_test.cc index d6d0f8f98d791..2ebda7c4bb57c 100644 --- a/onnxruntime/test/framework/data_types_test.cc +++ b/onnxruntime/test/framework/data_types_test.cc @@ -69,23 +69,23 @@ using MyOpaqueMapCpp_2 = std::map; using MyOpaqueSeqCpp_1 = std::vector; using MyOpaqueSeqCpp_2 = std::vector; -ONNXRUNTIME_REGISTER_MAP(MyOpaqueMapCpp_1); -ONNXRUNTIME_REGISTER_MAP(MyOpaqueMapCpp_2); +ORT_REGISTER_MAP(MyOpaqueMapCpp_1); +ORT_REGISTER_MAP(MyOpaqueMapCpp_2); -ONNXRUNTIME_REGISTER_MAP(TestMapToMapInt64ToFloat); -ONNXRUNTIME_REGISTER_MAP(TestMapStringToVectorInt64); -ONNXRUNTIME_REGISTER_MAP(TestMapMLFloat16ToFloat); +ORT_REGISTER_MAP(TestMapToMapInt64ToFloat); +ORT_REGISTER_MAP(TestMapStringToVectorInt64); +ORT_REGISTER_MAP(TestMapMLFloat16ToFloat); -ONNXRUNTIME_REGISTER_SEQ(MyOpaqueSeqCpp_1); -ONNXRUNTIME_REGISTER_SEQ(MyOpaqueSeqCpp_2); -ONNXRUNTIME_REGISTER_SEQ(TestSequenceOfSequence); +ORT_REGISTER_SEQ(MyOpaqueSeqCpp_1); +ORT_REGISTER_SEQ(MyOpaqueSeqCpp_2); +ORT_REGISTER_SEQ(TestSequenceOfSequence); -ONNXRUNTIME_REGISTER_OPAQUE_TYPE(TestOpaqueType_1, TestOpaqueDomain_1, TestOpaqueName_1); -ONNXRUNTIME_REGISTER_OPAQUE_TYPE(TestOpaqueType_2, TestOpaqueDomain_2, TestOpaqueName_2); +ORT_REGISTER_OPAQUE_TYPE(TestOpaqueType_1, TestOpaqueDomain_1, TestOpaqueName_1); +ORT_REGISTER_OPAQUE_TYPE(TestOpaqueType_2, TestOpaqueDomain_2, TestOpaqueName_2); // Special cases -ONNXRUNTIME_REGISTER_OPAQUE_TYPE(TestOpaqueDomainOnly, TestOpaqueDomain_1, TestOpaqueEmpty); -ONNXRUNTIME_REGISTER_OPAQUE_TYPE(TestOpaqueNameOnly, TestOpaqueEmpty, TestOpaqueName_1); -ONNXRUNTIME_REGISTER_OPAQUE_TYPE(TestOpaqueNoNames, TestOpaqueEmpty, TestOpaqueEmpty); +ORT_REGISTER_OPAQUE_TYPE(TestOpaqueDomainOnly, TestOpaqueDomain_1, TestOpaqueEmpty); +ORT_REGISTER_OPAQUE_TYPE(TestOpaqueNameOnly, TestOpaqueEmpty, TestOpaqueName_1); +ORT_REGISTER_OPAQUE_TYPE(TestOpaqueNoNames, TestOpaqueEmpty, TestOpaqueEmpty); #define REGISTER_ONNX_PROTO(TYPE) \ { \ diff --git a/onnxruntime/test/framework/execution_frame_test.cc b/onnxruntime/test/framework/execution_frame_test.cc index b713272cd4eae..8125bcb0ddc32 100644 --- a/onnxruntime/test/framework/execution_frame_test.cc +++ b/onnxruntime/test/framework/execution_frame_test.cc @@ -81,7 +81,7 @@ TEST(ExecutionFrameTest, TensorAllocationTest) { TensorShape shape(std::vector{2, 3}); status = frame.AllocateTensorWithSelfOwnBuffer(start_index, DataTypeImpl::GetType(), - execution_providers.Get(xp_typ)->GetAllocator(0, ONNXRuntimeMemTypeDefault)->Info(), shape); + execution_providers.Get(xp_typ)->GetAllocator(0, OrtMemTypeDefault)->Info(), shape); EXPECT_TRUE(status.IsOK()) << status.ErrorMessage(); MLValue* p_ml_value = frame.GetMutableNodeInputOrOutputMLValue(0); @@ -116,7 +116,7 @@ TEST(ExecutionFrameTest, FeedInDataTest) { graph.AddNode("node1", "Clip", "Clip operator", ArgMap{&input_def}, ArgMap{&output_def}); graph.Resolve(); - auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto element_type = DataTypeImpl::GetType(); TensorShape shape({3, 2}); void* buffer = cpu_allocator->Alloc(element_type->Size() * shape.Size()); @@ -206,7 +206,7 @@ TEST(ExecutionFrameTest, MemPatternTest) { mlvalue_name_idx_map.Add("T2"); mlvalue_name_idx_map.Add("T3"); - auto cpu_allocator = execution_providers.Get(xp_type)->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_allocator = execution_providers.Get(xp_type)->GetAllocator(0, OrtMemTypeDefault); MLValue v1, v2, v3; CreateMLValue(cpu_allocator, diff --git a/onnxruntime/test/framework/float_16_test.cc b/onnxruntime/test/framework/float_16_test.cc index 0be65ea0b2f16..a3adb7414aefb 100644 --- a/onnxruntime/test/framework/float_16_test.cc +++ b/onnxruntime/test/framework/float_16_test.cc @@ -106,7 +106,7 @@ void RunSession(InferenceSession& session_object, std::vector& values_y) { // prepare inputs MLValue ml_value; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_x, values_x, &ml_value); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_x, values_x, &ml_value); NameMLValMap feeds; feeds.insert(std::make_pair("X", ml_value)); diff --git a/onnxruntime/test/framework/inference_session_test.cc b/onnxruntime/test/framework/inference_session_test.cc index 4a334b35ec2de..3ae9478c90c5c 100644 --- a/onnxruntime/test/framework/inference_session_test.cc +++ b/onnxruntime/test/framework/inference_session_test.cc @@ -67,7 +67,7 @@ void RegisterOperatorKernels(std::function fn) { class FuseExecutionProvider : public IExecutionProvider { public: explicit FuseExecutionProvider() { - DeviceAllocatorRegistrationInfo device_info({ONNXRuntimeMemTypeDefault, + DeviceAllocatorRegistrationInfo device_info({OrtMemTypeDefault, [](int) { return std::make_unique(); }, std::numeric_limits::max()}); InsertAllocator(std::shared_ptr( std::make_unique(device_info.factory(0)))); @@ -101,8 +101,8 @@ class FuseExecutionProvider : public IExecutionProvider { } common::Status CopyTensor(const Tensor& src, Tensor& dst) const override { - ONNXRUNTIME_UNUSED_PARAMETER(src); - ONNXRUNTIME_UNUSED_PARAMETER(dst); + ORT_UNUSED_PARAMETER(src); + ORT_UNUSED_PARAMETER(dst); return Status::OK(); } @@ -174,7 +174,7 @@ void RunModel(InferenceSession& session_object, std::vector dims_mul_x = {3, 2}; std::vector values_mul_x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; MLValue ml_value; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value); NameMLValMap feeds; feeds.insert(std::make_pair("X", ml_value)); @@ -186,7 +186,7 @@ void RunModel(InferenceSession& session_object, if (is_preallocate_output_vec) { fetches.resize(output_names.size()); for (auto& elem : fetches) { - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &elem); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &elem); } } @@ -229,7 +229,7 @@ void RunModelWithBindingMatMul(InferenceSession& session_object, MLValue input_ml_value_B; std::vector dims_mul_x_B = {4, 3}; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x_B, values_mul_x, &input_ml_value_B); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x_B, values_mul_x, &input_ml_value_B); io_binding->BindInput("A", input_ml_value_A); io_binding->BindInput("B", input_ml_value_B); @@ -239,13 +239,13 @@ void RunModelWithBindingMatMul(InferenceSession& session_object, MLValue output_ml_value; if (is_preallocate_output_vec) { if (allocation_provider == kCpuExecutionProvider) { - AllocateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), expected_output_dims, &output_ml_value); + AllocateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), expected_output_dims, &output_ml_value); } else if (allocation_provider == kCudaExecutionProvider) { #ifdef USE_CUDA - AllocateMLValue(TestCudaExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), expected_output_dims, &output_ml_value); + AllocateMLValue(TestCudaExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), expected_output_dims, &output_ml_value); #endif } else { - ONNXRUNTIME_THROW("Unsupported provider"); + ORT_THROW("Unsupported provider"); } } io_binding->BindOutput("Y", output_ml_value); @@ -269,9 +269,9 @@ void RunModelWithBindingMatMul(InferenceSession& session_object, auto& rtensor = outputs.front().Get(); auto element_type = rtensor.DataType(); auto& shape = rtensor.Shape(); - auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto cpu_allocator = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); void* buffer = cpu_allocator->Alloc(element_type->Size() * shape.Size()); - ONNXRUNTIME_ENFORCE(buffer); + ORT_ENFORCE(buffer); std::unique_ptr cpu_tensor = std::make_unique(element_type, shape, buffer, @@ -324,7 +324,7 @@ TEST(InferenceSessionTests, DisableCPUArena) { RunModel(session_object, run_options); } -#ifdef ONNXRUNTIME_RUN_EXTERNAL_ONNX_TESTS +#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS static bool Compare(const InputDefList& f_arg, const InputDefList& s_arg) { if (f_arg.size() != s_arg.size()) { cout << "Sizes differ: f_arg size: " << f_arg.size() << " s_arg size: " << s_arg.size() << endl; @@ -705,7 +705,7 @@ TEST(InferenceSessionTests, TestIOBindingReuse) { MLValue ml_value1; vector v1{2.f}; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), {1}, v1, &ml_value1); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), {1}, v1, &ml_value1); io_binding->BindOutput("foo", ml_value1); ASSERT_TRUE(io_binding->GetOutputs().size() == 1); auto span = io_binding->GetOutputs()[0].Get().DataAsSpan(); @@ -716,7 +716,7 @@ TEST(InferenceSessionTests, TestIOBindingReuse) { MLValue ml_value2; vector v2{3.f}; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), {1}, v2, &ml_value2); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), {1}, v2, &ml_value2); io_binding->BindOutput("foo", ml_value2); ASSERT_TRUE(io_binding->GetOutputs().size() == 1); span = io_binding->GetOutputs()[0].Get().DataAsSpan(); @@ -742,7 +742,7 @@ TEST(InferenceSessionTests, InvalidInputTypeOfTensorElement) { std::vector dims_mul_x = {3, 2}; std::vector values_mul_x = {1, 2, 3, 4, 5, 6}; MLValue ml_value; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value); NameMLValMap feeds; feeds.insert(std::make_pair("X", ml_value)); @@ -871,15 +871,15 @@ static common::Status RunOptionalInputTest(bool add_required_input, std::vector unknown_input_val = {20.f}; MLValue required_input_mlvalue; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims, required_input_val, &required_input_mlvalue); MLValue optional_input_mlvalue; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims, optional_input_val, &optional_input_mlvalue); MLValue unknown_input_mlvalue; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims, unknown_input_val, &unknown_input_mlvalue); NameMLValMap feeds; @@ -908,7 +908,7 @@ static common::Status RunOptionalInputTest(bool add_required_input, const auto& tensor = output.Get(); float output_value = *tensor.Data(); if (output_value != expected_value) { - status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "Output of ", output_value, " != ", expected_value); + status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Output of ", output_value, " != ", expected_value); } } @@ -983,11 +983,11 @@ TEST(ExecutionProviderTest, FunctionTest) { std::vector dims_mul_x = {3, 2}; std::vector values_mul_x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; MLValue ml_value_x; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_x); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_x); MLValue ml_value_y; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_y); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_y); MLValue ml_value_z; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_z); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_z); NameMLValMap feeds; feeds.insert(std::make_pair("X", ml_value_x)); feeds.insert(std::make_pair("Y", ml_value_y)); @@ -1086,11 +1086,11 @@ TEST(ExecutionProviderTest, FunctionInlineTest) { std::vector dims_mul_x = {2, 2}; std::vector values_mul_x = {1.0f, 2.0f, 3.0f, 4.0f}; MLValue ml_value_x; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_x); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_x); MLValue ml_value_y; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_y); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_y); MLValue ml_value_z; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_z); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, &ml_value_z); NameMLValMap feeds; feeds.insert(std::make_pair("X", ml_value_x)); feeds.insert(std::make_pair("Y", ml_value_y)); diff --git a/onnxruntime/test/framework/local_kernel_registry_test.cc b/onnxruntime/test/framework/local_kernel_registry_test.cc index 1066ce6c0e0f7..796221d4bd358 100644 --- a/onnxruntime/test/framework/local_kernel_registry_test.cc +++ b/onnxruntime/test/framework/local_kernel_registry_test.cc @@ -198,7 +198,7 @@ void RunSession(InferenceSession& session_object, std::vector& values_y) { // prepare inputs MLValue ml_value; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_x, values_x, &ml_value); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_x, values_x, &ml_value); NameMLValMap feeds; feeds.insert(std::make_pair("X", ml_value)); diff --git a/onnxruntime/test/framework/op_kernel_test.cc b/onnxruntime/test/framework/op_kernel_test.cc index 15dc19e63375a..032b9a34c6fae 100644 --- a/onnxruntime/test/framework/op_kernel_test.cc +++ b/onnxruntime/test/framework/op_kernel_test.cc @@ -26,8 +26,8 @@ class XPUExecutionProvider : public IExecutionProvider { } Status CopyTensor(const Tensor& src, Tensor& dst) const override { - ONNXRUNTIME_UNUSED_PARAMETER(src); - ONNXRUNTIME_UNUSED_PARAMETER(dst); + ORT_UNUSED_PARAMETER(src); + ORT_UNUSED_PARAMETER(dst); return Status::OK(); } diff --git a/onnxruntime/test/framework/opaque_kernels_test.cc b/onnxruntime/test/framework/opaque_kernels_test.cc index 03348ec6fdb9c..0448bfa226d15 100644 --- a/onnxruntime/test/framework/opaque_kernels_test.cc +++ b/onnxruntime/test/framework/opaque_kernels_test.cc @@ -88,7 +88,7 @@ class SparseTensorSample final { extern const char kTestDomain[] = "ai.onnx"; extern const char kSparseTensorName[] = "SparseTensorSample"; -ONNXRUNTIME_REGISTER_OPAQUE_TYPE(SparseTensorSample, kTestDomain, kSparseTensorName); +ORT_REGISTER_OPAQUE_TYPE(SparseTensorSample, kTestDomain, kSparseTensorName); class OpaqueTypeTests : public testing::Test { public: @@ -113,7 +113,7 @@ class ConstructSparseTensor final : public OpKernel { ConstructSparseTensor(const OpKernelInfo& info) : OpKernel{info} {} Status Compute(OpKernelContext* ctx) const override { - ONNXRUNTIME_ENFORCE(ctx->InputCount() == 3, "Expecting 3 inputs"); + ORT_ENFORCE(ctx->InputCount() == 3, "Expecting 3 inputs"); const Tensor& values_tensor = *ctx->Input(0); const Tensor& indicies_tensor = *ctx->Input(1); @@ -123,13 +123,13 @@ class ConstructSparseTensor final : public OpKernel { // values const TensorShape& val_shape = values_tensor.Shape(); const TensorShape& ind_shape = indicies_tensor.Shape(); - ONNXRUNTIME_ENFORCE(val_shape.NumDimensions() == 1, "Expecting vectors"); - ONNXRUNTIME_ENFORCE(val_shape.NumDimensions() == ind_shape.NumDimensions()); + ORT_ENFORCE(val_shape.NumDimensions() == 1, "Expecting vectors"); + ORT_ENFORCE(val_shape.NumDimensions() == ind_shape.NumDimensions()); // Copy data. With some effort we could hold shallow copies of the input Tensors // but I will leave this for now. SparseTensorSample* output_sparse_tensor = ctx->Output(0); - ONNXRUNTIME_ENFORCE(output_sparse_tensor != nullptr); + ORT_ENFORCE(output_sparse_tensor != nullptr); output_sparse_tensor->Values().assign(values_tensor.Data(), values_tensor.Data() + val_shape[0]); output_sparse_tensor->Indicies().assign(indicies_tensor.Data(), @@ -152,14 +152,14 @@ class FetchSparseTensorShape final : public OpKernel { FetchSparseTensorShape(const OpKernelInfo& info) : OpKernel{info} {} Status Compute(OpKernelContext* ctx) const override { - ONNXRUNTIME_ENFORCE(ctx->InputCount() == 1, "Expecting a single SparseTensorSample input"); + ORT_ENFORCE(ctx->InputCount() == 1, "Expecting a single SparseTensorSample input"); const SparseTensorSample* sparse_input = ctx->Input(0); // Always a single dimension of 1 bc we are storing a single number const int64_t dims[1] = {1}; TensorShape output_shape(dims, 1); Tensor* sparse_shape = ctx->Output(0, output_shape); int64_t* shape_data = sparse_shape->MutableData(); - ONNXRUNTIME_ENFORCE(shape_data != nullptr); + ORT_ENFORCE(shape_data != nullptr); *shape_data = sparse_input->Size(); return Status::OK(); @@ -365,17 +365,17 @@ TEST_F(OpaqueTypeTests, RunModel) { std::vector values = {1, 2}; // prepare inputs MLValue ml_values; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), val_dims, values, &ml_values); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), val_dims, values, &ml_values); std::vector ind_dims = {2}; std::vector indicies = {1, 4}; MLValue ml_indicies; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), ind_dims, indicies, &ml_indicies); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), ind_dims, indicies, &ml_indicies); std::vector shape_dims = {1}; std::vector shape = {5}; MLValue ml_shape; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), shape_dims, shape, &ml_shape); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), shape_dims, shape, &ml_shape); NameMLValMap feeds; feeds.insert(std::make_pair("sparse_values", ml_values)); diff --git a/onnxruntime/test/framework/session_state_test.cc b/onnxruntime/test/framework/session_state_test.cc index 3304636940629..b586749f28e03 100644 --- a/onnxruntime/test/framework/session_state_test.cc +++ b/onnxruntime/test/framework/session_state_test.cc @@ -21,12 +21,12 @@ class TestOpKernel : public OpKernel { public: TestOpKernel(const OpKernelInfo& p) : OpKernel(p) {} Status Compute(OpKernelContext* context) const { - ONNXRUNTIME_UNUSED_PARAMETER(context); + ORT_UNUSED_PARAMETER(context); return Status::OK(); } Status ComputeAsync(OpKernelContext* context, DoneCallback done) const { - ONNXRUNTIME_UNUSED_PARAMETER(context); - ONNXRUNTIME_UNUSED_PARAMETER(done); + ORT_UNUSED_PARAMETER(context); + ORT_UNUSED_PARAMETER(done); return Status::OK(); } }; diff --git a/onnxruntime/test/framework/tensor_test.cc b/onnxruntime/test/framework/tensor_test.cc index d545ae65c3bc3..04dc942a4463d 100644 --- a/onnxruntime/test/framework/tensor_test.cc +++ b/onnxruntime/test/framework/tensor_test.cc @@ -16,7 +16,7 @@ template void CPUTensorTest(std::vector dims, const int offset = 0) { //not own the buffer TensorShape shape(dims); - auto alloc = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto alloc = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto data = alloc->Alloc(sizeof(T) * (shape.Size() + offset)); EXPECT_TRUE(data); Tensor t(DataTypeImpl::GetType(), shape, data, alloc->Info(), nullptr, offset); @@ -126,7 +126,7 @@ TEST(TensorTest, CPUUInt64TensorOffsetTest) { TEST(TensorTest, EmptyTensorTest) { auto type = DataTypeImpl::GetType(); - Tensor t(type, TensorShape({1, 0}), nullptr, TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault)->Info()); + Tensor t(type, TensorShape({1, 0}), nullptr, TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault)->Info()); auto& shape = t.Shape(); EXPECT_EQ(shape.Size(), 0); EXPECT_EQ(t.DataType(), type); @@ -137,12 +137,12 @@ TEST(TensorTest, EmptyTensorTest) { auto& location = t.Location(); ASSERT_STREQ(location.name, CPU); EXPECT_EQ(location.id, 0); - EXPECT_EQ(location.type, ONNXRuntimeAllocatorType::ONNXRuntimeArenaAllocator); + EXPECT_EQ(location.type, OrtAllocatorType::OrtArenaAllocator); } TEST(TensorTest, TensorCopyAssignOpTest) { TensorShape shape({1, 2, 3}); - auto alloc = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto alloc = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto data = alloc->Alloc(sizeof(int) * shape.Size()); EXPECT_TRUE(data); Tensor t1(DataTypeImpl::GetType(), shape, data, alloc->Info()); @@ -152,7 +152,7 @@ TEST(TensorTest, TensorCopyAssignOpTest) { auto location = t2.Location(); ASSERT_STREQ(location.name, CPU); EXPECT_EQ(location.id, 0); - EXPECT_EQ(location.type, ONNXRuntimeAllocatorType::ONNXRuntimeArenaAllocator); + EXPECT_EQ(location.type, OrtAllocatorType::OrtArenaAllocator); auto t_data = t2.template Data(); EXPECT_EQ((void*)t_data, data); alloc->Free(data); @@ -167,7 +167,7 @@ TEST(TensorTest, StringTensorTest) { #endif { TensorShape shape({2, 3}); - auto alloc = TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault); + auto alloc = TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault); auto buffer = alloc->Alloc(sizeof(std::string) * (shape.Size())); Tensor t(DataTypeImpl::GetType(), shape, buffer, alloc->Info(), alloc); diff --git a/onnxruntime/test/framework/tensorprotoutils_test.cc b/onnxruntime/test/framework/tensorprotoutils_test.cc index 3898629a80809..12ae8bad1ef07 100644 --- a/onnxruntime/test/framework/tensorprotoutils_test.cc +++ b/onnxruntime/test/framework/tensorprotoutils_test.cc @@ -11,7 +11,7 @@ namespace onnxruntime { namespace test { -#ifdef ONNXRUNTIME_RUN_EXTERNAL_ONNX_TESTS +#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS TEST(TensorProtoUtilsTest, test1) { const char* filename = "../models/opset8/test_resnet50/test_data_set_0/input_0.pb"; int test_data_pb_fd; diff --git a/onnxruntime/test/ir/onnx_model_test.cc b/onnxruntime/test/ir/onnx_model_test.cc index 7939fadf8eac5..8c39718778893 100644 --- a/onnxruntime/test/ir/onnx_model_test.cc +++ b/onnxruntime/test/ir/onnx_model_test.cc @@ -13,7 +13,7 @@ using namespace onnxruntime; using namespace ONNX_NAMESPACE; namespace onnxruntime { namespace test { -#ifdef ONNXRUNTIME_RUN_EXTERNAL_ONNX_TESTS +#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS // Tests that Resolve() properly clears the state of topological sorted nodes, // inputs, outputs and valueInfo. // Assumes the graph passed in has been previously resolved. @@ -72,7 +72,7 @@ TEST(ONNXModelsTest, non_existing_model) { #endif } -#ifdef ONNXRUNTIME_RUN_EXTERNAL_ONNX_TESTS +#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS TEST(ONNXModelsTest1, bvlc_alexnet_1) { using ::google::protobuf::io::CodedInputStream; using ::google::protobuf::io::FileInputStream; diff --git a/onnxruntime/test/onnx/FixedCountFinishCallback.h b/onnxruntime/test/onnx/FixedCountFinishCallback.h index 73117105e20a0..947a651b6868e 100644 --- a/onnxruntime/test/onnx/FixedCountFinishCallback.h +++ b/onnxruntime/test/onnx/FixedCountFinishCallback.h @@ -13,7 +13,7 @@ class FixedCountFinishCallbackImpl { //remain tasks int s_; std::mutex m_; - ONNXRUNTIME_EVENT finish_event_; + ORT_EVENT finish_event_; bool failed = false; std::vector> results_; @@ -26,14 +26,14 @@ class FixedCountFinishCallbackImpl { } FixedCountFinishCallbackImpl(int s) : s_(s), results_(s) { - ONNXRUNTIME_ENFORCE(CreateOnnxRuntimeEvent(&finish_event_).IsOK()); + ORT_ENFORCE(CreateOnnxRuntimeEvent(&finish_event_).IsOK()); } ~FixedCountFinishCallbackImpl() { - if (finish_event_) ONNXRuntimeCloseEvent(finish_event_); + if (finish_event_) OrtCloseEvent(finish_event_); } - ::onnxruntime::common::Status fail(ONNXRUNTIME_CALLBACK_INSTANCE pci) { + ::onnxruntime::common::Status fail(ORT_CALLBACK_INSTANCE pci) { { std::lock_guard g(m_); failed = true; @@ -42,7 +42,7 @@ class FixedCountFinishCallbackImpl { return OnnxRuntimeSetEventWhenCallbackReturns(pci, finish_event_); } - ::onnxruntime::common::Status onFinished(size_t task_index, std::shared_ptr result, ONNXRUNTIME_CALLBACK_INSTANCE pci) { + ::onnxruntime::common::Status onFinished(size_t task_index, std::shared_ptr result, ORT_CALLBACK_INSTANCE pci) { int v; { std::lock_guard g(m_); @@ -61,7 +61,7 @@ class FixedCountFinishCallbackImpl { } //this function can only be invoked once bool wait() { - ONNXRUNTIME_ENFORCE(WaitAndCloseEvent(finish_event_).IsOK()); + ORT_ENFORCE(WaitAndCloseEvent(finish_event_).IsOK()); { std::lock_guard g(m_); finish_event_ = nullptr; diff --git a/onnxruntime/test/onnx/TestCase.cc b/onnxruntime/test/onnx/TestCase.cc index 3de3e0ade2110..7f7eb686ea39b 100644 --- a/onnxruntime/test/onnx/TestCase.cc +++ b/onnxruntime/test/onnx/TestCase.cc @@ -153,7 +153,7 @@ static int ExtractFileNo(const std::basic_string& name) { const CHAR_T* end = number_str.c_str(); long ret = MyStrtol(start, const_cast(&end), 10); if (end == start) { - ONNXRUNTIME_THROW("parse file name failed"); + ORT_THROW("parse file name failed"); } return static_cast(ret); } @@ -173,13 +173,13 @@ static Status SortTensorFileNames(std::vector> for (size_t i = 0; i != input_pb_files.size(); ++i) { int fileno = ExtractFileNo(GetLastComponent(input_pb_files[i])); if (static_cast(fileno) != i) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "illegal input file name:", ToMBString(input_pb_files[i])); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "illegal input file name:", ToMBString(input_pb_files[i])); } } return Status::OK(); } -Status LoopDataFile(int test_data_pb_fd, ONNXRuntimeAllocator* env, +Status LoopDataFile(int test_data_pb_fd, OrtAllocator* env, const std::vector value_info, std::unordered_map& name_data_map, std::ostringstream& oss) { google::protobuf::io::FileInputStream f(test_data_pb_fd); f.SetCloseOnDelete(true); @@ -225,7 +225,7 @@ Status LoopDataFile(int test_data_pb_fd, ONNXRuntimeAllocator* env, case proto::TraditionalMLData::kTensor: { ONNXValue* temp_value; std::string s = data.tensor().SerializeAsString(); - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeTensorProtoToONNXValue(env, s.data(), (int)s.size(), &temp_value)); + ORT_THROW_ON_ERROR(OrtTensorProtoToONNXValue(env, s.data(), (int)s.size(), &temp_value)); gvalue.reset(temp_value); is_tensor = true; } break; @@ -246,9 +246,9 @@ Status LoopDataFile(int test_data_pb_fd, ONNXRuntimeAllocator* env, break; } } - if (!st.IsOK()) return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "load the ", item_id, "-th item failed,", st.ErrorMessage()); + if (!st.IsOK()) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "load the ", item_id, "-th item failed,", st.ErrorMessage()); if (!clean_eof) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "parse input file failed, has extra unparsed data"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "parse input file failed, has extra unparsed data"); } return Status::OK(); } @@ -273,7 +273,7 @@ class OnnxTestCase : public ITestCase { private: std::string test_case_name_; std::basic_string model_url_; - ONNXRuntimeAllocator* allocator; + OrtAllocator* allocator; std::vector debuginfo_strings; std::mutex m_; std::vector input_value_info_; @@ -302,10 +302,10 @@ class OnnxTestCase : public ITestCase { bool post_processing_; Status ParseModel(); Status ParseConfig(); - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OnnxTestCase); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OnnxTestCase); public: - OnnxTestCase(ONNXRuntimeAllocator* env, const std::string& test_case_name); + OnnxTestCase(OrtAllocator* env, const std::string& test_case_name); explicit OnnxTestCase(const std::string& test_case_name) : test_case_name_(test_case_name) {} Status GetPerSampleTolerance(double* value) override; Status GetRelativePerSampleTolerance(double* value) override; @@ -338,7 +338,7 @@ class OnnxTestCase : public ITestCase { Status OnnxTestCase::loadModelFile(const PATH_CHAR_TYPE* model_url, ONNX_NAMESPACE::ModelProto** model_pb) { int model_fd; - ONNXRUNTIME_RETURN_IF_ERROR(Env::Default().FileOpenRd(model_url, model_fd)); + ORT_RETURN_IF_ERROR(Env::Default().FileOpenRd(model_url, model_fd)); google::protobuf::io::FileInputStream f(model_fd); f.SetCloseOnDelete(true); ONNX_NAMESPACE::ModelProto* ret = new ONNX_NAMESPACE::ModelProto(); @@ -348,14 +348,14 @@ Status OnnxTestCase::loadModelFile(const PATH_CHAR_TYPE* model_url, ONNX_NAMESPA *model_pb = ret; return Status::OK(); } -ITestCase* CreateOnnxTestCase(ONNXRuntimeAllocator* ptr, const std::string& test_case_name) { +ITestCase* CreateOnnxTestCase(OrtAllocator* ptr, const std::string& test_case_name) { return new OnnxTestCase(ptr, test_case_name); } Status OnnxTestCase::GetPerSampleTolerance(double* value) { Status st = ParseConfig(); if (!st.IsOK()) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse test config failed:", st.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse test config failed:", st.ErrorMessage()); *value = per_sample_tolerance_; return Status::OK(); @@ -364,7 +364,7 @@ Status OnnxTestCase::GetPerSampleTolerance(double* value) { Status OnnxTestCase::GetRelativePerSampleTolerance(double* value) { Status st = ParseConfig(); if (!st.IsOK()) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse test config failed:", st.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse test config failed:", st.ErrorMessage()); *value = relative_per_sample_tolerance_; return Status::OK(); } @@ -372,7 +372,7 @@ Status OnnxTestCase::GetRelativePerSampleTolerance(double* value) { Status OnnxTestCase::GetPostProcessing(bool* value) { Status st = ParseConfig(); if (!st.IsOK()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse test config failed:", st.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse test config failed:", st.ErrorMessage()); } *value = post_processing_; return Status::OK(); @@ -381,7 +381,7 @@ Status OnnxTestCase::GetPostProcessing(bool* value) { Status OnnxTestCase::ParseConfig() { std::call_once(config_parsed_, [this]() { std::basic_string config_path = - ReplaceFilename>(model_url_, ONNXRUNTIME_TSTR("config.txt")); + ReplaceFilename>(model_url_, ORT_TSTR("config.txt")); int config_fd; auto st = Env::Default().FileOpenRd(config_path, config_fd); if (st.IsOK()) { @@ -451,12 +451,12 @@ static Status LoadTensors(const std::vector& pb_files, for (size_t i = 0; i != pb_files.size(); ++i) { int tensor_fd; auto st = Env::Default().FileOpenRd(pb_files.at(i), tensor_fd); - ONNXRUNTIME_RETURN_IF_ERROR(st); + ORT_RETURN_IF_ERROR(st); google::protobuf::io::FileInputStream f(tensor_fd); f.SetCloseOnDelete(true); ONNX_NAMESPACE::TensorProto tensor; if (!tensor.ParseFromZeroCopyStream(&f)) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "parse file '", ToMBString(pb_files.at(i)), "' failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "parse file '", ToMBString(pb_files.at(i)), "' failed"); } input_pbs->emplace_back(tensor); } @@ -469,10 +469,10 @@ Status OnnxTestCase::LoadTestData(ONNXSession* session, size_t id, std::unordere Status st = ParseModel(); if (!st.IsOK()) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse model failed:", st.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse model failed:", st.ErrorMessage()); PATH_STRING_TYPE test_data_pb = ConcatPathComponent( - test_data_dirs_[id], (is_input ? ONNXRUNTIME_TSTR("inputs.pb") : ONNXRUNTIME_TSTR("outputs.pb"))); + test_data_dirs_[id], (is_input ? ORT_TSTR("inputs.pb") : ORT_TSTR("outputs.pb"))); int test_data_pb_fd; st = Env::Default().FileOpenRd(test_data_pb, test_data_pb_fd); if (st.IsOK()) { //has an all-in-one input file @@ -487,8 +487,8 @@ Status OnnxTestCase::LoadTestData(ONNXSession* session, size_t id, std::unordere debuginfo_strings[id] = oss.str(); } if (!st.IsOK()) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse data file \"", ToMBString(test_data_pb), - "\" failed:", st.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse data file \"", ToMBString(test_data_pb), + "\" failed:", st.ErrorMessage()); return Status::OK(); } @@ -499,20 +499,20 @@ Status OnnxTestCase::LoadTestData(ONNXSession* session, size_t id, std::unordere if (filename[0] == '.') return true; if (f_type != FileType::TYPE_REG) return true; std::basic_string filename_str = filename; - if (!HasExtensionOf(filename_str, ONNXRUNTIME_TSTR("pb"))) return true; + if (!HasExtensionOf(filename_str, ORT_TSTR("pb"))) return true; const std::basic_string file_prefix = - is_input ? ONNXRUNTIME_TSTR("input_") : ONNXRUNTIME_TSTR("output_"); + is_input ? ORT_TSTR("input_") : ORT_TSTR("output_"); if (!filename_str.compare(0, file_prefix.length(), file_prefix.c_str())) { std::basic_string p = ConcatPathComponent(dir_path, filename_str); test_data_pb_files.push_back(p); } return true; }); - ONNXRUNTIME_RETURN_IF_ERROR(SortTensorFileNames(test_data_pb_files)); + ORT_RETURN_IF_ERROR(SortTensorFileNames(test_data_pb_files)); std::vector test_data_pbs; - ONNXRUNTIME_RETURN_IF_ERROR(LoadTensors(test_data_pb_files, &test_data_pbs)); - ONNXRUNTIME_RETURN_IF_ERROR(ConvertTestData(session, test_data_pbs, is_input, name_data_map)); + ORT_RETURN_IF_ERROR(LoadTensors(test_data_pb_files, &test_data_pbs)); + ORT_RETURN_IF_ERROR(ConvertTestData(session, test_data_pbs, is_input, name_data_map)); return Status::OK(); } @@ -531,18 +531,18 @@ Status OnnxTestCase::ConvertTestData(ONNXSession* session, const std::vectorFree(allocator, temp_name); @@ -553,11 +553,11 @@ Status OnnxTestCase::ConvertTestData(ONNXSession* session, const std::vector& name_data_map, bool is_input) = 0; virtual const PATH_CHAR_TYPE* GetModelUrl() const = 0; virtual const std::string& GetTestCaseName() const = 0; @@ -34,4 +34,4 @@ class ITestCase { virtual ::onnxruntime::common::Status GetPostProcessing(bool* value) = 0; }; -ITestCase* CreateOnnxTestCase(ONNXRuntimeAllocator* ptr, const std::string& test_case_name); +ITestCase* CreateOnnxTestCase(OrtAllocator* ptr, const std::string& test_case_name); diff --git a/onnxruntime/test/onnx/main.cc b/onnxruntime/test/onnx/main.cc index df1ed84854504..8f74bc5ea9a2d 100644 --- a/onnxruntime/test/onnx/main.cc +++ b/onnxruntime/test/onnx/main.cc @@ -45,7 +45,7 @@ int GetNumCpuCores() { SYSTEM_INFO sysInfo; GetSystemInfo(&sysInfo); if (sysInfo.dwNumberOfProcessors <= 0) { - ONNXRUNTIME_THROW("Fatal error: 0 count processors from GetSystemInfo"); + ORT_THROW("Fatal error: 0 count processors from GetSystemInfo"); } // This is the number of logical processors in the current group return sysInfo.dwNumberOfProcessors; @@ -57,7 +57,7 @@ int GetNumCpuCores() { ++processorCoreCount; } } - if (!processorCoreCount) ONNXRUNTIME_THROW("Fatal error: 0 count processors from GetLogicalProcessorInformation"); + if (!processorCoreCount) ORT_THROW("Fatal error: 0 count processors from GetLogicalProcessorInformation"); return processorCoreCount; } #else @@ -82,16 +82,16 @@ int real_main(int argc, char* argv[]) { bool enable_cuda = false; bool enable_mkl = false; bool enable_nuphar = false; - ONNXRuntimeLoggingLevel logging_level = ONNXRUNTIME_LOGGING_LEVEL_kWARNING; + OrtLoggingLevel logging_level = ORT_LOGGING_LEVEL_kWARNING; { int ch; - while ((ch = getopt(argc, argv, ONNXRUNTIME_TSTR("Ac:hj:m:n:r:e:xv"))) != -1) { + while ((ch = getopt(argc, argv, ORT_TSTR("Ac:hj:m:n:r:e:xv"))) != -1) { switch (ch) { case 'A': enable_cpu_mem_arena = false; break; case 'v': - logging_level = ONNXRUNTIME_LOGGING_LEVEL_kINFO; + logging_level = ORT_LOGGING_LEVEL_kINFO; break; case 'c': concurrent_session_runs = static_cast(MyStrtol(optarg, nullptr, 10)); @@ -123,13 +123,13 @@ int real_main(int argc, char* argv[]) { whitelisted_test_cases.emplace_back(optarg); break; case 'e': - if (!MyStrCmp(optarg, ONNXRUNTIME_TSTR("cpu"))) { + if (!MyStrCmp(optarg, ORT_TSTR("cpu"))) { //do nothing - } else if (!MyStrCmp(optarg, ONNXRUNTIME_TSTR("cuda"))) { + } else if (!MyStrCmp(optarg, ORT_TSTR("cuda"))) { enable_cuda = true; - } else if (!MyStrCmp(optarg, ONNXRUNTIME_TSTR("mkldnn"))) { + } else if (!MyStrCmp(optarg, ORT_TSTR("mkldnn"))) { enable_mkl = true; - } else if (!MyStrCmp(optarg, ONNXRUNTIME_TSTR("nuphar"))) { + } else if (!MyStrCmp(optarg, ORT_TSTR("nuphar"))) { enable_nuphar = true; } else { usage(); @@ -159,12 +159,12 @@ int real_main(int argc, char* argv[]) { usage(); return -1; } - std::unique_ptr env; + std::unique_ptr env; { - ONNXRuntimeEnv* t; - ONNXStatus* ost = ONNXRuntimeInitialize(logging_level, "Default", &t); + OrtEnv* t; + ONNXStatus* ost = OrtInitialize(logging_level, "Default", &t); if (ost != nullptr) { - fprintf(stderr, "Error creating environment: %s \n", ONNXRuntimeGetErrorMessage(ost)); + fprintf(stderr, "Error creating environment: %s \n", OrtGetErrorMessage(ost)); ReleaseONNXStatus(ost); return -1; } @@ -173,12 +173,12 @@ int real_main(int argc, char* argv[]) { std::vector > data_dirs; TestResultStat stat; - std::unique_ptr default_allocator; + std::unique_ptr default_allocator; { - ONNXRuntimeAllocator* p; - ONNXStatus* ost = ONNXRuntimeCreateDefaultAllocator(&p); + OrtAllocator* p; + ONNXStatus* ost = OrtCreateDefaultAllocator(&p); if (ost != nullptr) { - fprintf(stderr, "Error creating environment: %s \n", ONNXRuntimeGetErrorMessage(ost)); + fprintf(stderr, "Error creating environment: %s \n", OrtGetErrorMessage(ost)); ReleaseONNXStatus(ost); return -1; } @@ -200,10 +200,10 @@ int real_main(int argc, char* argv[]) { sf.DisableSequentialExecution(); if (enable_cuda) { #ifdef USE_CUDA - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCUDAExecutionProviderFactory(0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateCUDAExecutionProviderFactory(0, &f)); sf.AppendExecutionProvider(f); - ONNXRuntimeReleaseObject(f); + OrtReleaseObject(f); #else fprintf(stderr, "CUDA is supported in this build"); return -1; @@ -211,10 +211,10 @@ int real_main(int argc, char* argv[]) { } if (enable_nuphar) { #ifdef USE_NUPHAR - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateNupharExecutionProviderFactory(0, "", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateNupharExecutionProviderFactory(0, "", &f)); sf.AppendExecutionProvider(f); - ONNXRuntimeReleaseObject(f); + OrtReleaseObject(f); #else fprintf(stderr, "Nuphar is supported in this build"); return -1; @@ -222,10 +222,10 @@ int real_main(int argc, char* argv[]) { } if (enable_mkl) { #ifdef USE_MKLDNN - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateMkldnnExecutionProviderFactory(enable_cpu_mem_arena ? 1 : 0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateMkldnnExecutionProviderFactory(enable_cpu_mem_arena ? 1 : 0, &f)); sf.AppendExecutionProvider(f); - ONNXRuntimeReleaseObject(f); + OrtReleaseObject(f); #else fprintf(stderr, "MKL-DNN is supported in this build"); return -1; diff --git a/onnxruntime/test/onnx/onnxruntime_event.h b/onnxruntime/test/onnx/onnxruntime_event.h index bfbc65eb1d201..ed186a4d4c3d9 100644 --- a/onnxruntime/test/onnx/onnxruntime_event.h +++ b/onnxruntime/test/onnx/onnxruntime_event.h @@ -13,7 +13,7 @@ struct OnnxRuntimeEvent { OnnxRuntimeEvent() = default; private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OnnxRuntimeEvent); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OnnxRuntimeEvent); }; -using ONNXRUNTIME_EVENT = OnnxRuntimeEvent*; +using ORT_EVENT = OnnxRuntimeEvent*; diff --git a/onnxruntime/test/onnx/path_lib.h b/onnxruntime/test/onnx/path_lib.h index f693dcd149a53..9d3158229ef72 100644 --- a/onnxruntime/test/onnx/path_lib.h +++ b/onnxruntime/test/onnx/path_lib.h @@ -18,10 +18,10 @@ #ifdef _WIN32 typedef wchar_t PATH_CHAR_TYPE; -#define ONNXRUNTIME_TSTR(X) L##X +#define ORT_TSTR(X) L##X #else typedef char PATH_CHAR_TYPE; -#define ONNXRUNTIME_TSTR(X) (X) +#define ORT_TSTR(X) (X) #endif template diff --git a/onnxruntime/test/onnx/runner.cc b/onnxruntime/test/onnx/runner.cc index a7382d898e07b..7fac9800f13c7 100644 --- a/onnxruntime/test/onnx/runner.cc +++ b/onnxruntime/test/onnx/runner.cc @@ -25,14 +25,14 @@ using namespace onnxruntime; using ::onnxruntime::common::Status; -void ONNXRUNTIME_CALLBACK RunTestCase(ONNXRUNTIME_CALLBACK_INSTANCE pci, void* context, ONNXRUNTIME_WORK work) { +void ORT_CALLBACK RunTestCase(ORT_CALLBACK_INSTANCE pci, void* context, ORT_WORK work) { OnnxRuntimeCloseThreadpoolWork(work); assert(context != nullptr); TestCaseTask* task((TestCaseTask*)context); ITestCase* info = task->env.tests[task->task_id]; std::shared_ptr ret; try { - RunSingleTestCase(info, task->env.sf, task->concurrent_runs, task->repeat_count, task->pool, pci, [task](std::shared_ptr result, ONNXRUNTIME_CALLBACK_INSTANCE pci) { + RunSingleTestCase(info, task->env.sf, task->concurrent_runs, task->repeat_count, task->pool, pci, [task](std::shared_ptr result, ORT_CALLBACK_INSTANCE pci) { return OnTestCaseFinished(pci, task, result); }); return; @@ -49,7 +49,7 @@ void ONNXRUNTIME_CALLBACK RunTestCase(ONNXRUNTIME_CALLBACK_INSTANCE pci, void* c } } -void PTestRunner::Start(ONNXRUNTIME_CALLBACK_INSTANCE, size_t concurrent_runs) { +void PTestRunner::Start(ORT_CALLBACK_INSTANCE, size_t concurrent_runs) { concurrent_runs = std::min(std::max(1, concurrent_runs), c_->GetDataCount()); next_test_to_run = 0; for (size_t i = 0; i != concurrent_runs; ++i) { @@ -72,7 +72,7 @@ bool PTestRunner::ScheduleNew() { return true; } -void PTestRunner::OnTaskFinished(size_t, EXECUTE_RESULT, ONNXRUNTIME_CALLBACK_INSTANCE pci) noexcept { +void PTestRunner::OnTaskFinished(size_t, EXECUTE_RESULT, ORT_CALLBACK_INSTANCE pci) noexcept { try { ScheduleNew(); if (++finished == c_->GetDataCount()) { @@ -93,7 +93,7 @@ PTestRunner::PTestRunner(ONNXSession* session1, TestCaseCallBack on_finished1) : DataRunner(session1, c->GetTestCaseName(), c, on_finished1), next_test_to_run(0), finished(0), tpool_(tpool) { } -void ONNXRUNTIME_CALLBACK RunSingleDataItem(ONNXRUNTIME_CALLBACK_INSTANCE instance, void* context, ONNXRUNTIME_WORK work) { +void ORT_CALLBACK RunSingleDataItem(ORT_CALLBACK_INSTANCE instance, void* context, ORT_WORK work) { OnnxRuntimeCloseThreadpoolWork(work); DataTask* task((DataTask*)context); PTestRunner* env = task->env; @@ -102,7 +102,7 @@ void ONNXRUNTIME_CALLBACK RunSingleDataItem(ONNXRUNTIME_CALLBACK_INSTANCE instan env->RunTask(task_id, instance, true); } -Status OnTestCaseFinished(ONNXRUNTIME_CALLBACK_INSTANCE pci, TestCaseTask* task, std::shared_ptr result) { +Status OnTestCaseFinished(ORT_CALLBACK_INSTANCE pci, TestCaseTask* task, std::shared_ptr result) { FixedCountFinishCallback* finished = task->env.finished; auto task_id = task->task_id; bool failed = false; @@ -159,16 +159,16 @@ Status RunTests(TestEnv& env, int p_models, int concurrent_runs, size_t repeat_c }); std::vector> results; if (p_models > 1 && env.tests.size() > 1) { - ONNXRUNTIME_RETURN_IF_ERROR(ParallelRunTests(env, p_models, concurrent_runs, repeat_count, tpool)); + ORT_RETURN_IF_ERROR(ParallelRunTests(env, p_models, concurrent_runs, repeat_count, tpool)); results = env.finished->getResults(); } else { //run models one by one for (size_t i = 0; i != env.tests.size(); ++i) { const char* test_case_name = env.tests[i]->GetTestCaseName().c_str(); - ONNXRUNTIME_EVENT ev; - ONNXRUNTIME_RETURN_IF_ERROR(CreateOnnxRuntimeEvent(&ev)); + ORT_EVENT ev; + ORT_RETURN_IF_ERROR(CreateOnnxRuntimeEvent(&ev)); try { - RunSingleTestCase(env.tests[i], env.sf, concurrent_runs, repeat_count, tpool, nullptr, [repeat_count, &results, ev, concurrent_runs, test_case_name](std::shared_ptr result, ONNXRUNTIME_CALLBACK_INSTANCE pci) { + RunSingleTestCase(env.tests[i], env.sf, concurrent_runs, repeat_count, tpool, nullptr, [repeat_count, &results, ev, concurrent_runs, test_case_name](std::shared_ptr result, ORT_CALLBACK_INSTANCE pci) { //TODO:output this information to a xml if (concurrent_runs == 1) { TIME_SPEC ts = result->GetSpentTime(); @@ -179,14 +179,14 @@ Status RunTests(TestEnv& env, int p_models, int concurrent_runs, size_t repeat_c results.push_back(result); return OnnxRuntimeSetEventWhenCallbackReturns(pci, ev); }); - ONNXRUNTIME_RETURN_IF_ERROR(WaitAndCloseEvent(ev)); + ORT_RETURN_IF_ERROR(WaitAndCloseEvent(ev)); } catch (std::exception& ex) { LOGF_DEFAULT(ERROR, "Test %s failed:%s", test_case_name, ex.what()); std::string node_name; (void)env.tests[i]->GetNodeName(&node_name); results.push_back( std::make_shared(env.tests[i]->GetDataCount(), EXECUTE_RESULT::WITH_EXCEPTION, node_name)); - ONNXRuntimeCloseEvent(ev); + OrtCloseEvent(ev); } } } @@ -235,7 +235,7 @@ Status RunTests(TestEnv& env, int p_models, int concurrent_runs, size_t repeat_c if (!r.node_name.empty()) stat.AddFailedKernels(r.node_name); break; default: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "unknown result"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "unknown result"); } } } @@ -244,7 +244,7 @@ Status RunTests(TestEnv& env, int p_models, int concurrent_runs, size_t repeat_c std::vector LoadTests(const std::vector>& input_paths, const std::vector>& whitelisted_test_cases, - ONNXRuntimeAllocator* env) { + OrtAllocator* env) { std::vector tests; std::vector> paths(input_paths); while (!paths.empty()) { @@ -259,10 +259,10 @@ std::vector LoadTests(const std::vector filename_str = filename; - if (!HasExtensionOf(filename_str, ONNXRUNTIME_TSTR("onnx"))) return true; + if (!HasExtensionOf(filename_str, ORT_TSTR("onnx"))) return true; std::basic_string test_case_name = my_dir_name; - if (test_case_name.compare(0, 5, ONNXRUNTIME_TSTR("test_")) == 0) test_case_name = test_case_name.substr(5); + if (test_case_name.compare(0, 5, ORT_TSTR("test_")) == 0) test_case_name = test_case_name.substr(5); if (!whitelisted_test_cases.empty() && std::find(whitelisted_test_cases.begin(), whitelisted_test_cases.end(), test_case_name) == whitelisted_test_cases.end()) { return true; } @@ -287,14 +287,14 @@ SeqTestRunner::SeqTestRunner(ONNXSession* session1, TestCaseCallBack on_finished1) : DataRunner(session1, c->GetTestCaseName(), c, on_finished1), repeat_count_(repeat_count) { } -DataRunner::DataRunner(ONNXSession* session1, const std::string& test_case_name1, ITestCase* c, TestCaseCallBack on_finished1) : test_case_name_(test_case_name1), c_(c), session(session1), on_finished(on_finished1), default_allocator(MockedONNXRuntimeAllocator::Create()) { +DataRunner::DataRunner(ONNXSession* session1, const std::string& test_case_name1, ITestCase* c, TestCaseCallBack on_finished1) : test_case_name_(test_case_name1), c_(c), session(session1), on_finished(on_finished1), default_allocator(MockedOrtAllocator::Create()) { std::string s; c->GetNodeName(&s); result = std::make_shared(c->GetDataCount(), EXECUTE_RESULT::UNKNOWN_ERROR, s); SetTimeSpecToZero(&spent_time_); } -void DataRunner::RunTask(size_t task_id, ONNXRUNTIME_CALLBACK_INSTANCE pci, bool store_result) { +void DataRunner::RunTask(size_t task_id, ORT_CALLBACK_INSTANCE pci, bool store_result) { EXECUTE_RESULT res = EXECUTE_RESULT::UNKNOWN_ERROR; try { res = RunTaskImpl(task_id); @@ -322,11 +322,11 @@ EXECUTE_RESULT DataRunner::RunTaskImpl(size_t task_id) { // Create output feed size_t output_count; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInferenceSessionGetOutputCount(session, &output_count)); + ORT_THROW_ON_ERROR(OrtInferenceSessionGetOutputCount(session, &output_count)); std::vector output_names(output_count); for (size_t i = 0; i != output_count; ++i) { char* output_name = nullptr; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInferenceSessionGetOutputName(session, i, default_allocator, &output_name)); + ORT_THROW_ON_ERROR(OrtInferenceSessionGetOutputName(session, i, default_allocator, &output_name)); assert(output_name != nullptr); output_names[i] = output_name; (*default_allocator)->Free(default_allocator, output_name); @@ -348,9 +348,9 @@ EXECUTE_RESULT DataRunner::RunTaskImpl(size_t task_id) { for (size_t i = 0; i != output_count; ++i) { output_names_raw_ptr[i] = output_names[i].c_str(); } - auto onnx_status = ONNXRuntimeRunInference(session, nullptr, input_names.data(), input_values.data(), input_index, output_names_raw_ptr.data(), output_count, output_values.data()); + auto onnx_status = OrtRunInference(session, nullptr, input_names.data(), input_values.data(), input_index, output_names_raw_ptr.data(), output_count, output_values.data()); if (onnx_status != nullptr) { - std::string onnx_runtime_error_message = ONNXRuntimeGetErrorMessage(onnx_status); + std::string onnx_runtime_error_message = OrtGetErrorMessage(onnx_status); ReleaseONNXStatus(onnx_status); for (auto& kvp : feeds) { ReleaseONNXValue(kvp.second); @@ -469,7 +469,7 @@ EXECUTE_RESULT DataRunner::RunTaskImpl(size_t task_id) { return res; } -void SeqTestRunner::Start(ONNXRUNTIME_CALLBACK_INSTANCE pci, size_t) { +void SeqTestRunner::Start(ORT_CALLBACK_INSTANCE pci, size_t) { const size_t data_count = c_->GetDataCount(); for (size_t idx_repeat = 0; idx_repeat != repeat_count_; ++idx_repeat) for (size_t idx_data = 0; idx_data != data_count; ++idx_data) { @@ -478,7 +478,7 @@ void SeqTestRunner::Start(ONNXRUNTIME_CALLBACK_INSTANCE pci, size_t) { finish(pci); } -void RunSingleTestCase(ITestCase* info, const onnxruntime::SessionOptionsWrapper& sf, size_t concurrent_runs, size_t repeat_count, PThreadPool tpool, ONNXRUNTIME_CALLBACK_INSTANCE pci, TestCaseCallBack on_finished) { +void RunSingleTestCase(ITestCase* info, const onnxruntime::SessionOptionsWrapper& sf, size_t concurrent_runs, size_t repeat_count, PThreadPool tpool, ORT_CALLBACK_INSTANCE pci, TestCaseCallBack on_finished) { std::shared_ptr ret; size_t data_count = info->GetDataCount(); { @@ -493,7 +493,7 @@ void RunSingleTestCase(ITestCase* info, const onnxruntime::SessionOptionsWrapper auto sf2 = sf.clone(); sf2.SetSessionLogId(info->GetTestCaseName().c_str()); std::unique_ptr session_object( - sf2.ONNXRuntimeCreateInferenceSession(info->GetModelUrl()), ReleaseONNXSession); + sf2.OrtCreateInferenceSession(info->GetModelUrl()), ReleaseONNXSession); LOGF_DEFAULT(INFO, "testing %s\n", info->GetTestCaseName().c_str()); //temp hack. Because we have no resource control. We may not have enough memory to run this test in parallel if (info->GetTestCaseName() == "coreml_FNS-Candy_ImageNet") diff --git a/onnxruntime/test/onnx/runner.h b/onnxruntime/test/onnx/runner.h index 738d76480b010..9691e52e3844b 100644 --- a/onnxruntime/test/onnx/runner.h +++ b/onnxruntime/test/onnx/runner.h @@ -15,7 +15,7 @@ #include "testenv.h" #include "sync_api.h" -typedef std::function<::onnxruntime::common::Status(std::shared_ptr result, ONNXRUNTIME_CALLBACK_INSTANCE pci)> TestCaseCallBack; +typedef std::function<::onnxruntime::common::Status(std::shared_ptr result, ORT_CALLBACK_INSTANCE pci)> TestCaseCallBack; struct TestCaseTask { TestEnv& env; @@ -26,10 +26,10 @@ struct TestCaseTask { const PThreadPool pool; }; -void ONNXRUNTIME_CALLBACK RunTestCase(ONNXRUNTIME_CALLBACK_INSTANCE instance, void* context, ONNXRUNTIME_WORK work); +void ORT_CALLBACK RunTestCase(ORT_CALLBACK_INSTANCE instance, void* context, ORT_WORK work); //TODO: implement this function for Linux -void ONNXRUNTIME_CALLBACK RunSingleDataItem(ONNXRUNTIME_CALLBACK_INSTANCE instance, void* context, ONNXRUNTIME_WORK work); -::onnxruntime::common::Status OnTestCaseFinished(ONNXRUNTIME_CALLBACK_INSTANCE pci, TestCaseTask* task, std::shared_ptr result); +void ORT_CALLBACK RunSingleDataItem(ORT_CALLBACK_INSTANCE instance, void* context, ORT_WORK work); +::onnxruntime::common::Status OnTestCaseFinished(ORT_CALLBACK_INSTANCE pci, TestCaseTask* task, std::shared_ptr result); class DataRunner { protected: @@ -43,22 +43,22 @@ class DataRunner { private: ONNXSession* session; CALL_BACK on_finished; - ONNXRuntimeAllocatorInteface** const default_allocator; + OrtAllocatorInterface** const default_allocator; EXECUTE_RESULT RunTaskImpl(size_t task_id); - ONNXRUNTIME_DISALLOW_COPY_AND_ASSIGNMENT(DataRunner); + ORT_DISALLOW_COPY_AND_ASSIGNMENT(DataRunner); public: DataRunner(ONNXSession* session1, const std::string& test_case_name1, ITestCase* c, TestCaseCallBack on_finished1); - virtual void OnTaskFinished(size_t task_id, EXECUTE_RESULT res, ONNXRUNTIME_CALLBACK_INSTANCE pci) noexcept = 0; - void RunTask(size_t task_id, ONNXRUNTIME_CALLBACK_INSTANCE pci, bool store_result); + virtual void OnTaskFinished(size_t task_id, EXECUTE_RESULT res, ORT_CALLBACK_INSTANCE pci) noexcept = 0; + void RunTask(size_t task_id, ORT_CALLBACK_INSTANCE pci, bool store_result); virtual ~DataRunner() { ReleaseONNXSession(session); - ONNXRuntimeReleaseObject(default_allocator); + OrtReleaseObject(default_allocator); } - virtual void Start(ONNXRUNTIME_CALLBACK_INSTANCE pci, size_t concurrent_runs) = 0; + virtual void Start(ORT_CALLBACK_INSTANCE pci, size_t concurrent_runs) = 0; - void finish(ONNXRUNTIME_CALLBACK_INSTANCE pci) { + void finish(ORT_CALLBACK_INSTANCE pci) { std::shared_ptr res = result; CALL_BACK callback = on_finished; res->SetSpentTime(spent_time_); @@ -102,18 +102,18 @@ class SeqTestRunner : public DataRunner { ITestCase* c, size_t repeat_count, TestCaseCallBack on_finished1); - void Start(ONNXRUNTIME_CALLBACK_INSTANCE pci, size_t concurrent_runs) override; - void OnTaskFinished(size_t, EXECUTE_RESULT, ONNXRUNTIME_CALLBACK_INSTANCE) noexcept override {} + void Start(ORT_CALLBACK_INSTANCE pci, size_t concurrent_runs) override; + void OnTaskFinished(size_t, EXECUTE_RESULT, ORT_CALLBACK_INSTANCE) noexcept override {} }; class PTestRunner : public DataRunner { private: std::atomic next_test_to_run; std::atomic finished; - void OnTaskFinished(size_t task_id, EXECUTE_RESULT res, ONNXRUNTIME_CALLBACK_INSTANCE pci) noexcept override; + void OnTaskFinished(size_t task_id, EXECUTE_RESULT res, ORT_CALLBACK_INSTANCE pci) noexcept override; public: - void Start(ONNXRUNTIME_CALLBACK_INSTANCE pci, size_t concurrent_runs) override; + void Start(ORT_CALLBACK_INSTANCE pci, size_t concurrent_runs) override; PTestRunner(ONNXSession* session1, ITestCase* c, PThreadPool tpool, @@ -131,8 +131,8 @@ struct DataTask { std::vector LoadTests(const std::vector>& input_paths, const std::vector>& whitelisted_test_cases, - ONNXRuntimeAllocator* env); + OrtAllocator* env); //Do not run this function in the thread pool passed in ::onnxruntime::common::Status RunTests(TestEnv& env, int p_models, int concurrent_runs, size_t repeat_count, PThreadPool tpool); EXECUTE_RESULT StatusCodeToExecuteResult(int input); -void RunSingleTestCase(ITestCase* info, const onnxruntime::SessionOptionsWrapper& sf, size_t concurrent_runs, size_t repeat_count, PThreadPool tpool, ONNXRUNTIME_CALLBACK_INSTANCE pci, TestCaseCallBack on_finished); +void RunSingleTestCase(ITestCase* info, const onnxruntime::SessionOptionsWrapper& sf, size_t concurrent_runs, size_t repeat_count, PThreadPool tpool, ORT_CALLBACK_INSTANCE pci, TestCaseCallBack on_finished); diff --git a/onnxruntime/test/onnx/sync_api.h b/onnxruntime/test/onnx/sync_api.h index 00c9e83a6592a..7bc8db4d1a7f9 100644 --- a/onnxruntime/test/onnx/sync_api.h +++ b/onnxruntime/test/onnx/sync_api.h @@ -13,31 +13,31 @@ #include #ifdef _WIN32 -using ONNXRUNTIME_CALLBACK_INSTANCE = PTP_CALLBACK_INSTANCE; -using ONNXRUNTIME_EVENT = HANDLE; -#define ONNXRUNTIME_CALLBACK __stdcall -using ONNXRUNTIME_WORK = PTP_WORK; +using ORT_CALLBACK_INSTANCE = PTP_CALLBACK_INSTANCE; +using ORT_EVENT = HANDLE; +#define ORT_CALLBACK __stdcall +using ORT_WORK = PTP_WORK; using PThreadPool = PTP_CALLBACK_ENVIRON; -using ONNXRUNTIME_CALLBACK_FUNCTION = PTP_WORK_CALLBACK; +using ORT_CALLBACK_FUNCTION = PTP_WORK_CALLBACK; #define OnnxRuntimeCloseThreadpoolWork CloseThreadpoolWork inline PThreadPool GetDefaultThreadPool(const ::onnxruntime::Env&) { return nullptr; } #else -#define ONNXRUNTIME_CALLBACK +#define ORT_CALLBACK namespace Eigen { class ThreadPoolInterface; } using PThreadPool = Eigen::ThreadPoolInterface*; -#define ONNXRUNTIME_WORK void* +#define ORT_WORK void* struct OnnxRuntimeEvent; -using ONNXRUNTIME_EVENT = OnnxRuntimeEvent*; +using ORT_EVENT = OnnxRuntimeEvent*; class OnnxRuntimeCallbackInstance; -using ONNXRUNTIME_CALLBACK_INSTANCE = OnnxRuntimeCallbackInstance*; -using ONNXRUNTIME_CALLBACK_FUNCTION = void ONNXRUNTIME_CALLBACK (*)(ONNXRUNTIME_CALLBACK_INSTANCE pci, void* context, ONNXRUNTIME_WORK work); +using ORT_CALLBACK_INSTANCE = OnnxRuntimeCallbackInstance*; +using ORT_CALLBACK_FUNCTION = void ORT_CALLBACK (*)(ORT_CALLBACK_INSTANCE pci, void* context, ORT_WORK work); //Do nothing -inline void OnnxRuntimeCloseThreadpoolWork(ONNXRUNTIME_WORK) {} +inline void OnnxRuntimeCloseThreadpoolWork(ORT_WORK) {} #endif //The returned value will be used with CreateAndSubmitThreadpoolWork function @@ -45,9 +45,9 @@ PThreadPool GetDefaultThreadPool(const ::onnxruntime::Env& env); //On Windows, the last parameter can be null, in that case it will use the default thread pool. //On Linux, there is no per process default thread pool. You have to pass a non-null pointer. //Caller must delete the data pointer if this function returns a non-ok status. Otherwise, the ownership is transferred -::onnxruntime::common::Status CreateAndSubmitThreadpoolWork(ONNXRUNTIME_CALLBACK_FUNCTION callback, void* data, PThreadPool pool); -::onnxruntime::common::Status CreateOnnxRuntimeEvent(ONNXRUNTIME_EVENT* out); +::onnxruntime::common::Status CreateAndSubmitThreadpoolWork(ORT_CALLBACK_FUNCTION callback, void* data, PThreadPool pool); +::onnxruntime::common::Status CreateOnnxRuntimeEvent(ORT_EVENT* out); //pci is a pointer, can be NULL. If pci is NULL, signal the event immediately -::onnxruntime::common::Status OnnxRuntimeSetEventWhenCallbackReturns(ONNXRUNTIME_CALLBACK_INSTANCE pci, ONNXRUNTIME_EVENT finish_event); -::onnxruntime::common::Status WaitAndCloseEvent(ONNXRUNTIME_EVENT finish_event); -void ONNXRuntimeCloseEvent(ONNXRUNTIME_EVENT finish_event); +::onnxruntime::common::Status OnnxRuntimeSetEventWhenCallbackReturns(ORT_CALLBACK_INSTANCE pci, ORT_EVENT finish_event); +::onnxruntime::common::Status WaitAndCloseEvent(ORT_EVENT finish_event); +void OrtCloseEvent(ORT_EVENT finish_event); diff --git a/onnxruntime/test/onnx/sync_api_linux.cc b/onnxruntime/test/onnx/sync_api_linux.cc index c35ed430284ce..591b6c292ca70 100644 --- a/onnxruntime/test/onnx/sync_api_linux.cc +++ b/onnxruntime/test/onnx/sync_api_linux.cc @@ -15,14 +15,14 @@ using onnxruntime::common::Status; //OnnxRuntimeSetEventWhenCallbackReturns class OnnxRuntimeCallbackInstance { private: - std::vector events_to_signal_; + std::vector events_to_signal_; public: - void AddEvent(ONNXRUNTIME_EVENT event); + void AddEvent(ORT_EVENT event); onnxruntime::common::Status SignalAllEvents(); }; -Status WaitAndCloseEvent(ONNXRUNTIME_EVENT finish_event) { +Status WaitAndCloseEvent(ORT_EVENT finish_event) { if (finish_event == nullptr) return Status(onnxruntime::common::ONNXRUNTIME, onnxruntime::common::INVALID_ARGUMENT, ""); pthread_mutex_lock(&finish_event->finish_event_mutex); @@ -34,7 +34,7 @@ Status WaitAndCloseEvent(ONNXRUNTIME_EVENT finish_event) { return Status::OK(); } -Status CreateAndSubmitThreadpoolWork(ONNXRUNTIME_CALLBACK_FUNCTION callback, void* data, PThreadPool pool) { +Status CreateAndSubmitThreadpoolWork(ORT_CALLBACK_FUNCTION callback, void* data, PThreadPool pool) { if (callback == nullptr) return Status(onnxruntime::common::ONNXRUNTIME, onnxruntime::common::INVALID_ARGUMENT, "callback cannot be NULL"); if (pool == nullptr) @@ -63,52 +63,52 @@ PThreadPool GetDefaultThreadPool(const onnxruntime::Env& env) { return default_pool.get(); } -Status OnnxRuntimeSetEventWhenCallbackReturns(ONNXRUNTIME_CALLBACK_INSTANCE pci, ONNXRUNTIME_EVENT finish_event) { +Status OnnxRuntimeSetEventWhenCallbackReturns(ORT_CALLBACK_INSTANCE pci, ORT_EVENT finish_event) { if (finish_event == nullptr) return Status(onnxruntime::common::ONNXRUNTIME, onnxruntime::common::INVALID_ARGUMENT, ""); if (pci == nullptr) { if (pthread_mutex_lock(&finish_event->finish_event_mutex)) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "lock failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "lock failed"); } finish_event->finished = true; if (pthread_mutex_unlock(&finish_event->finish_event_mutex)) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "unlock failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "unlock failed"); if (!pthread_cond_broadcast(&finish_event->finish_event_data)) return Status::OK(); else - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "pthread_cond_broadcast failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "pthread_cond_broadcast failed"); } else { pci->AddEvent(finish_event); return Status::OK(); } } -void OnnxRuntimeCallbackInstance::AddEvent(ONNXRUNTIME_EVENT event) { +void OnnxRuntimeCallbackInstance::AddEvent(ORT_EVENT event) { events_to_signal_.push_back(event); } Status OnnxRuntimeCallbackInstance::SignalAllEvents() { - for (ONNXRUNTIME_EVENT finish_event : events_to_signal_) { + for (ORT_EVENT finish_event : events_to_signal_) { if (pthread_mutex_lock(&finish_event->finish_event_mutex)) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "lock failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "lock failed"); } finish_event->finished = true; if (pthread_mutex_unlock(&finish_event->finish_event_mutex)) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "unlock failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "unlock failed"); if (pthread_cond_broadcast(&finish_event->finish_event_data)) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "pthread_cond_broadcast failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "pthread_cond_broadcast failed"); } return Status::OK(); } -Status CreateOnnxRuntimeEvent(ONNXRUNTIME_EVENT* out) { +Status CreateOnnxRuntimeEvent(ORT_EVENT* out) { if (out == nullptr) return Status(onnxruntime::common::ONNXRUNTIME, onnxruntime::common::INVALID_ARGUMENT, ""); *out = new OnnxRuntimeEvent(); return Status::OK(); } -void ONNXRuntimeCloseEvent(ONNXRUNTIME_EVENT finish_event) { +void OrtCloseEvent(ORT_EVENT finish_event) { delete finish_event; } diff --git a/onnxruntime/test/onnx/sync_api_win.cc b/onnxruntime/test/onnx/sync_api_win.cc index 28f4e16a12a4b..b5ce8644709f0 100644 --- a/onnxruntime/test/onnx/sync_api_win.cc +++ b/onnxruntime/test/onnx/sync_api_win.cc @@ -6,7 +6,7 @@ using ::onnxruntime::common::Status; -Status CreateAndSubmitThreadpoolWork(ONNXRUNTIME_CALLBACK_FUNCTION callback, void* data, PThreadPool pool) { +Status CreateAndSubmitThreadpoolWork(ORT_CALLBACK_FUNCTION callback, void* data, PThreadPool pool) { PTP_WORK work = CreateThreadpoolWork(callback, data, pool); if (!work) { return Status(::onnxruntime::common::ONNXRUNTIME, ::onnxruntime::common::FAIL, "create thread pool task failed"); @@ -15,16 +15,16 @@ Status CreateAndSubmitThreadpoolWork(ONNXRUNTIME_CALLBACK_FUNCTION callback, voi return Status::OK(); } -Status WaitAndCloseEvent(ONNXRUNTIME_EVENT finish_event) { +Status WaitAndCloseEvent(ORT_EVENT finish_event) { DWORD dwWaitResult = WaitForSingleObject(finish_event, INFINITE); (void)CloseHandle(finish_event); if (dwWaitResult != WAIT_OBJECT_0) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "WaitForSingleObject failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "WaitForSingleObject failed"); } return Status::OK(); } -Status CreateOnnxRuntimeEvent(ONNXRUNTIME_EVENT* out) { +Status CreateOnnxRuntimeEvent(ORT_EVENT* out) { if (out == nullptr) return Status(::onnxruntime::common::ONNXRUNTIME, ::onnxruntime::common::INVALID_ARGUMENT, ""); HANDLE finish_event = CreateEvent( @@ -33,23 +33,23 @@ Status CreateOnnxRuntimeEvent(ONNXRUNTIME_EVENT* out) { FALSE, // initial state is nonsignaled NULL); if (finish_event == NULL) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "unable to create finish event"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "unable to create finish event"); } *out = finish_event; return Status::OK(); } -Status OnnxRuntimeSetEventWhenCallbackReturns(ONNXRUNTIME_CALLBACK_INSTANCE pci, ONNXRUNTIME_EVENT finish_event) { +Status OnnxRuntimeSetEventWhenCallbackReturns(ORT_CALLBACK_INSTANCE pci, ORT_EVENT finish_event) { if (finish_event == nullptr) return Status(::onnxruntime::common::ONNXRUNTIME, ::onnxruntime::common::INVALID_ARGUMENT, ""); if (pci) SetEventWhenCallbackReturns(pci, finish_event); else if (!SetEvent(finish_event)) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "SetEvent failed"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "SetEvent failed"); } return Status::OK(); } -void ONNXRuntimeCloseEvent(ONNXRUNTIME_EVENT finish_event) { +void OrtCloseEvent(ORT_EVENT finish_event) { (void)CloseHandle(finish_event); -} \ No newline at end of file +} diff --git a/onnxruntime/test/onnx/testenv.h b/onnxruntime/test/onnx/testenv.h index f77ac7badc30a..34b591f733b51 100644 --- a/onnxruntime/test/onnx/testenv.h +++ b/onnxruntime/test/onnx/testenv.h @@ -25,5 +25,5 @@ class TestEnv { ~TestEnv(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TestEnv); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TestEnv); }; diff --git a/onnxruntime/test/onnxruntime_exec/Model.h b/onnxruntime/test/onnxruntime_exec/Model.h index 917a6a2fbc26b..e59ff897994b2 100644 --- a/onnxruntime/test/onnxruntime_exec/Model.h +++ b/onnxruntime/test/onnxruntime_exec/Model.h @@ -19,7 +19,7 @@ enum class ExecutionStatus { MODEL_LOADING_FAILURE = 1, DATA_LOADING_FAILURE = 2, PREDICTION_FAILURE = 3, - ONNXRUNTIME_NOT_IMPLEMENTED = 5 + ORT_NOT_IMPLEMENTED = 5 }; class Model { @@ -33,7 +33,7 @@ class Model { struct stat s; if (stat(datafile.c_str(), &s) == 0) { if (s.st_mode & S_IFDIR) { - exec_status_ = ExecutionStatus::ONNXRUNTIME_NOT_IMPLEMENTED; + exec_status_ = ExecutionStatus::ORT_NOT_IMPLEMENTED; return; } } diff --git a/onnxruntime/test/onnxruntime_exec/Runtime.h b/onnxruntime/test/onnxruntime_exec/Runtime.h index 53301d8ca37b3..bbf1aed93479f 100644 --- a/onnxruntime/test/onnxruntime_exec/Runtime.h +++ b/onnxruntime/test/onnxruntime_exec/Runtime.h @@ -238,11 +238,11 @@ class WinMLRuntime { if (*type == "tensor(double)" || *type == "tensor(float)") { // If double is used in the following statement, following error occurs. // Tensor type mismatch, caller expects elements to be float while tensor contains double Error from operator - mlvalue = ReadTensor(TestCPUExecutionProvider().GetAllocator(0, ONNXRuntimeMemTypeDefault), inputs_reader, feature_size, shape, variable_batch_size); + mlvalue = ReadTensor(TestCPUExecutionProvider().GetAllocator(0, OrtMemTypeDefault), inputs_reader, feature_size, shape, variable_batch_size); } else if (*type == "tensor(int64)") - mlvalue = ReadTensor(TestCPUExecutionProvider().GetAllocator(0, ONNXRuntimeMemTypeDefault), inputs_reader, feature_size, shape, variable_batch_size); + mlvalue = ReadTensor(TestCPUExecutionProvider().GetAllocator(0, OrtMemTypeDefault), inputs_reader, feature_size, shape, variable_batch_size); else if (*type == "tensor(string)") - mlvalue = ReadTensorStrings(TestCPUExecutionProvider().GetAllocator(0, ONNXRuntimeMemTypeDefault), inputs_reader, feature_size, shape, variable_batch_size); + mlvalue = ReadTensorStrings(TestCPUExecutionProvider().GetAllocator(0, OrtMemTypeDefault), inputs_reader, feature_size, shape, variable_batch_size); else throw DataValidationException("Unsupported input type: " + std::string(*type)); diff --git a/onnxruntime/test/perftest/TestCase.cc b/onnxruntime/test/perftest/TestCase.cc index b47016d4978b7..3b50dbdf663bf 100644 --- a/onnxruntime/test/perftest/TestCase.cc +++ b/onnxruntime/test/perftest/TestCase.cc @@ -155,7 +155,7 @@ static int ExtractFileNo(const std::string& name) { const char* end = number_str.c_str(); long ret = strtol(start, const_cast(&end), 10); if (end == start) { - ONNXRUNTIME_THROW("parse file name failed"); + ORT_THROW("parse file name failed"); } return static_cast(ret); } @@ -172,7 +172,7 @@ static Status SortTensorFileNames(std::vector& input_pb_files) { for (size_t i = 0; i != input_pb_files.size(); ++i) { int fileno = ExtractFileNo(input_pb_files[i].filename().string()); if (fileno != i) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "illegal input file name:", input_pb_files[i].filename().string()); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "illegal input file name:", input_pb_files[i].filename().string()); } } return Status::OK(); @@ -240,9 +240,9 @@ Status LoopDataFile(int test_data_pb_fd, AllocatorPtr allocator, break; } } - if (!st.IsOK()) return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "load the ", item_id, "-th item failed,", st.ErrorMessage()); + if (!st.IsOK()) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "load the ", item_id, "-th item failed,", st.ErrorMessage()); if (!clean_eof) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "parse input file failed, has extra unparsed data"); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "parse input file failed, has extra unparsed data"); } return Status::OK(); } @@ -314,7 +314,7 @@ class OnnxTestCase : public ITestCase { std::once_flag model_parsed_; Status ParseModel(); - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OnnxTestCase); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OnnxTestCase); public: OnnxTestCase(const AllocatorPtr&, const std::string& test_case_name); @@ -388,10 +388,10 @@ static Status LoadTensors(const std::vector& pb_files, std::vectoremplace_back(tensor); } @@ -404,7 +404,7 @@ Status OnnxTestCase::LoadTestData(size_t id, onnxruntime::NameMLValMap& name_dat Status st = ParseModel(); if (!st.IsOK()) - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse model failed:", st.ErrorMessage()); + return ORT_MAKE_STATUS(ONNXRUNTIME, MODEL_LOADED, "parse model failed:", st.ErrorMessage()); path test_data_pb = test_data_dirs_[id] / (is_input ? "inputs.pb" : "outputs.pb"); int test_data_pb_fd; @@ -436,17 +436,17 @@ Status OnnxTestCase::LoadTestData(size_t id, onnxruntime::NameMLValMap& name_dat test_data_pb_files.push_back(f); } } - ONNXRUNTIME_RETURN_IF_ERROR(SortTensorFileNames(test_data_pb_files)); + ORT_RETURN_IF_ERROR(SortTensorFileNames(test_data_pb_files)); std::vector test_data_pbs; - ONNXRUNTIME_RETURN_IF_ERROR(LoadTensors(test_data_pb_files, &test_data_pbs)); - ONNXRUNTIME_RETURN_IF_ERROR(ConvertTestData(test_data_pbs, is_input ? input_value_info_ : output_value_info_, name_data_map)); + ORT_RETURN_IF_ERROR(LoadTensors(test_data_pb_files, &test_data_pbs)); + ORT_RETURN_IF_ERROR(ConvertTestData(test_data_pbs, is_input ? input_value_info_ : output_value_info_, name_data_map)); return Status::OK(); } Status OnnxTestCase::FromPbFiles(const std::vector& files, std::vector& output_values) { for (const path& f : files) { - if (!f.has_extension()) return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "unknown file type, path = ", f); + if (!f.has_extension()) return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "unknown file type, path = ", f); std::string s = f.extension().string(); if (s != ".pb") continue; @@ -461,7 +461,7 @@ Status OnnxTestCase::FromPbFiles(const std::vector& files, std::vector& test_ std::string name = var_names[input_index]; const onnx::TensorProto& input = test_data_pbs[input_index]; MLValue v1; - ONNXRUNTIME_RETURN_IF_ERROR(utils::TensorProtoToMLValue(input, allocator_, nullptr, 0, v1)); + ORT_RETURN_IF_ERROR(utils::TensorProtoToMLValue(input, allocator_, nullptr, 0, v1)); out.insert(std::make_pair(name, v1)); } return Status::OK(); diff --git a/onnxruntime/test/perftest/performance_runner.cc b/onnxruntime/test/perftest/performance_runner.cc index 0c6567b40604f..20162b09abb4b 100644 --- a/onnxruntime/test/perftest/performance_runner.cc +++ b/onnxruntime/test/perftest/performance_runner.cc @@ -19,7 +19,7 @@ namespace onnxruntime { namespace perftest { Status PerformanceRunner::Run() { if (!Initialize()) { - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "failed to initialize."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "failed to initialize."); } // warm up @@ -31,13 +31,13 @@ Status PerformanceRunner::Run() { std::unique_ptr p_ICPUUsage = utils::CreateICPUUsage(); switch (performance_test_config_.run_config.test_mode) { case TestMode::kFixDurationMode: - ONNXRUNTIME_RETURN_IF_ERROR(RunFixDuration()); + ORT_RETURN_IF_ERROR(RunFixDuration()); break; case TestMode::KFixRepeatedTimesMode: - ONNXRUNTIME_RETURN_IF_ERROR(RunRepeatedTimes()); + ORT_RETURN_IF_ERROR(RunRepeatedTimes()); break; default: - return ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, "unknown test mode."); + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "unknown test mode."); } performance_result_.average_CPU_usage = p_ICPUUsage->GetUsage(); performance_result_.peak_workingset_size = utils::GetPeakWorkingSetSize(); diff --git a/onnxruntime/test/perftest/performance_runner.h b/onnxruntime/test/perftest/performance_runner.h index 1143d59e5548d..d17ed720682f4 100644 --- a/onnxruntime/test/perftest/performance_runner.h +++ b/onnxruntime/test/perftest/performance_runner.h @@ -81,7 +81,7 @@ class PerformanceRunner { inline Status RunOneIteration(bool isWarmup = false) { auto start = std::chrono::high_resolution_clock::now(); - ONNXRUNTIME_RETURN_IF_ERROR(session_object_->Run(*io_binding_)); + ORT_RETURN_IF_ERROR(session_object_->Run(*io_binding_)); auto end = std::chrono::high_resolution_clock::now(); if (!isWarmup) { @@ -98,14 +98,14 @@ class PerformanceRunner { inline Status RunFixDuration() { while (performance_result_.total_time_cost < performance_test_config_.run_config.duration_in_seconds) { - ONNXRUNTIME_RETURN_IF_ERROR(RunOneIteration()); + ORT_RETURN_IF_ERROR(RunOneIteration()); } return Status::OK(); } inline Status RunRepeatedTimes() { for (size_t ite = 0; ite < performance_test_config_.run_config.repeated_times; ite++) { - ONNXRUNTIME_RETURN_IF_ERROR(RunOneIteration()); + ORT_RETURN_IF_ERROR(RunOneIteration()); } return Status::OK(); } diff --git a/onnxruntime/test/perftest/testenv.cc b/onnxruntime/test/perftest/testenv.cc index d83c22b6d3027..f661a1d7f8303 100644 --- a/onnxruntime/test/perftest/testenv.cc +++ b/onnxruntime/test/perftest/testenv.cc @@ -16,8 +16,8 @@ using namespace std::experimental::filesystem::v1; using onnxruntime::Status; -inline void RegisterExecutionProvider(onnxruntime::InferenceSession* sess, ONNXRuntimeProviderFactoryInterface** f) { - ONNXRuntimeProvider* p; +inline void RegisterExecutionProvider(onnxruntime::InferenceSession* sess, OrtProviderFactoryInterface** f) { + OrtProvider* p; (*f)->CreateProvider(f, &p); std::unique_ptr q((onnxruntime::IExecutionProvider*)p); auto status = sess->RegisterExecutionProvider(std::move(q)); @@ -26,7 +26,7 @@ inline void RegisterExecutionProvider(onnxruntime::InferenceSession* sess, ONNXR } } #define FACTORY_PTR_HOLDER \ - std::unique_ptr ptr_holder_(f, ONNXRuntimeReleaseObject); + std::unique_ptr ptr_holder_(f, OrtReleaseObject); Status SessionFactory::create(std::shared_ptr<::onnxruntime::InferenceSession>& sess, const path& model_url, const std::string& logid) const { ::onnxruntime::SessionOptions so; @@ -41,46 +41,46 @@ Status SessionFactory::create(std::shared_ptr<::onnxruntime::InferenceSession>& for (const std::string& provider : providers_) { if (provider == onnxruntime::kCudaExecutionProvider) { #ifdef USE_CUDA - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCUDAExecutionProviderFactory(0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateCUDAExecutionProviderFactory(0, &f)); FACTORY_PTR_HOLDER; RegisterExecutionProvider(sess.get(), f); #else - ONNXRUNTIME_THROW("CUDA is not supported in this build"); + ORT_THROW("CUDA is not supported in this build"); #endif } else if (provider == onnxruntime::kMklDnnExecutionProvider) { #ifdef USE_MKLDNN - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateMkldnnExecutionProviderFactory(enable_cpu_mem_arena_ ? 1 : 0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateMkldnnExecutionProviderFactory(enable_cpu_mem_arena_ ? 1 : 0, &f)); FACTORY_PTR_HOLDER; RegisterExecutionProvider(sess.get(), f); #else - ONNXRUNTIME_THROW("CUDA is not supported in this build"); + ORT_THROW("CUDA is not supported in this build"); #endif } else if (provider == onnxruntime::kNupharExecutionProvider) { #ifdef USE_NUPHAR - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateNupharExecutionProviderFactory(0, "", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateNupharExecutionProviderFactory(0, "", &f)); RegisterExecutionProvider(sess.get(), f); FACTORY_PTR_HOLDER; #else - ONNXRUNTIME_THROW("CUDA is not supported in this build"); + ORT_THROW("CUDA is not supported in this build"); #endif } else if (provider == onnxruntime::kBrainSliceExecutionProvider) { #if USE_BRAINSLICE - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateBrainSliceExecutionProviderFactory(0, true, "testdata/firmwares/onnx_rnns/instructions.bin", "testdata/firmwares/onnx_rnns/data.bin", "testdata/firmwares/onnx_rnns/schema.bin", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateBrainSliceExecutionProviderFactory(0, true, "testdata/firmwares/onnx_rnns/instructions.bin", "testdata/firmwares/onnx_rnns/data.bin", "testdata/firmwares/onnx_rnns/schema.bin", &f)); RegisterExecutionProvider(sess.get(), f); FACTORY_PTR_HOLDER; #else - ONNXRUNTIME_THROW("This executable was not built with BrainSlice"); + ORT_THROW("This executable was not built with BrainSlice"); #endif } //TODO: add more } status = sess->Load(model_url.string()); - ONNXRUNTIME_RETURN_IF_ERROR(status); + ORT_RETURN_IF_ERROR(status); LOGS_DEFAULT(INFO) << "successfully loaded model from " << model_url; status = sess->Initialize(); if (status.IsOK()) diff --git a/onnxruntime/test/platform/windows/stacktrace_test.cc b/onnxruntime/test/platform/windows/stacktrace_test.cc index c7da48daadc57..9859abffea920 100644 --- a/onnxruntime/test/platform/windows/stacktrace_test.cc +++ b/onnxruntime/test/platform/windows/stacktrace_test.cc @@ -33,7 +33,7 @@ TEST(StacktraceTests, BasicTests) { EXPECT_THAT(result[0], HasSubstr("Unknown symbol")); try { - ONNXRUNTIME_THROW("Testing"); + ORT_THROW("Testing"); } catch (const OnnxRuntimeException& ex) { auto msg = ex.what(); std::cout << msg; diff --git a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc index 4d7e918d3d944..ad13abc9368ff 100644 --- a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc +++ b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc @@ -438,7 +438,7 @@ TEST(Loop, InfiniteLoopTermination) { test.AddOutput("loop_var_0_final", {1}, {0.f}); - ONNXRuntimeRunOptions session_run_options; + OrtRunOptions session_run_options; session_run_options.run_tag = "Loop.InfiniteLoopTermination"; auto terminator = [&session_run_options]() { diff --git a/onnxruntime/test/providers/cpu/cpu_execution_provider_test.cc b/onnxruntime/test/providers/cpu/cpu_execution_provider_test.cc index 0f941a10454be..5d8626d8b4cbd 100644 --- a/onnxruntime/test/providers/cpu/cpu_execution_provider_test.cc +++ b/onnxruntime/test/providers/cpu/cpu_execution_provider_test.cc @@ -10,7 +10,7 @@ TEST(CPUExecutionProviderTest, MetadataTest) { CPUExecutionProviderInfo info; auto provider = std::make_unique(info); EXPECT_TRUE(provider != nullptr); - ASSERT_STREQ(provider->GetAllocator(0, ONNXRuntimeMemTypeDefault)->Info().name, CPU); + ASSERT_STREQ(provider->GetAllocator(0, OrtMemTypeDefault)->Info().name, CPU); } } // namespace test } // namespace onnxruntime diff --git a/onnxruntime/test/providers/cpu/ml/zipmap_test.cc b/onnxruntime/test/providers/cpu/ml/zipmap_test.cc index 8ce74d7a2aa8f..e2e8ced73e565 100644 --- a/onnxruntime/test/providers/cpu/ml/zipmap_test.cc +++ b/onnxruntime/test/providers/cpu/ml/zipmap_test.cc @@ -20,7 +20,7 @@ void TestHelper(const std::vector& classes, } else if (type == "int64_t") { test.AddAttribute("classlabels_int64s", classes); } else { - ONNXRUNTIME_THROW("Invalid type: ", type); + ORT_THROW("Invalid type: ", type); } int64_t batch_size = (input_dims.size() > 1) ? input_dims[0] : 1; diff --git a/onnxruntime/test/providers/cpu/nn/conv_op_test.cc b/onnxruntime/test/providers/cpu/nn/conv_op_test.cc index 5c4b1659bd794..e4003e179bea0 100644 --- a/onnxruntime/test/providers/cpu/nn/conv_op_test.cc +++ b/onnxruntime/test/providers/cpu/nn/conv_op_test.cc @@ -37,7 +37,7 @@ void TestConvOp(const ConvOpAttributes& attributes, } test.AddAttribute("strides", attributes.strides); - ONNXRUNTIME_ENFORCE(inputs.size() <= 3, "Our name array is only setup to handle 3 inputs"); + ORT_ENFORCE(inputs.size() <= 3, "Our name array is only setup to handle 3 inputs"); const char* szNames[] = {"X", "W", "B"}; for (size_t i = 0; i < inputs.size(); i++) { test.AddInput(szNames[i], input_shapes[i], inputs[i]); diff --git a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc index f7813b10e3a87..1fd25268fdbc7 100644 --- a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc +++ b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc @@ -37,7 +37,7 @@ void TestConvTransposeOp(const ConvTransposeOpAttributes& attributes, test.AddAttribute("strides", attributes.strides); test.AddAttribute("group", attributes.group); - ONNXRUNTIME_ENFORCE(inputs.size() <= 3, "Our name array is only setup to handle 3 inputs"); + ORT_ENFORCE(inputs.size() <= 3, "Our name array is only setup to handle 3 inputs"); const char* szNames[] = {"X", "W", "B"}; for (size_t i = 0; i < inputs.size(); i++) { test.AddInput(szNames[i], input_shapes[i], inputs[i]); diff --git a/onnxruntime/test/providers/provider_test_utils.cc b/onnxruntime/test/providers/provider_test_utils.cc index a3a39a35dd9ac..98e368f0fdb8e 100644 --- a/onnxruntime/test/providers/provider_test_utils.cc +++ b/onnxruntime/test/providers/provider_test_utils.cc @@ -73,7 +73,7 @@ void CheckDispatch(MLDataType type, const OpTester::Data& expected_data, const T if (type == DataTypeImpl::GetType()) Check(expected_data, output_tensor, provider_type); else - ONNXRUNTIME_THROW("OpTester:Check() not implemented for output tensor type of ", type); + ORT_THROW("OpTester:Check() not implemented for output tensor type of ", type); } template @@ -85,10 +85,10 @@ void CheckDispatch(MLDataType type, const OpTester::Data& expected_data, const T } void Check(const OpTester::Data& expected_data, const Tensor& output_tensor, const std::string& provider_type) { - ONNXRUNTIME_ENFORCE(expected_data.data_.Get().Shape() == output_tensor.Shape(), - "Expected output shape [" + expected_data.data_.Get().Shape().ToString() + - "] did not match run output shape [" + - output_tensor.Shape().ToString() + "] for " + expected_data.def_.Name()); + ORT_ENFORCE(expected_data.data_.Get().Shape() == output_tensor.Shape(), + "Expected output shape [" + expected_data.data_.Get().Shape().ToString() + + "] did not match run output shape [" + + output_tensor.Shape().ToString() + "] for " + expected_data.def_.Name()); CheckDispatch(output_tensor.DataType(), expected_data, output_tensor, provider_type); } @@ -105,7 +105,7 @@ void CheckDispatch(MLDataType type, const OpTester::Data& expected_data, MLValue if (type == DataTypeImpl::GetType()) Check(expected_data, mlvalue.Get(), provider_type); else - ONNXRUNTIME_THROW("OpTester:Check() not implemented for output tensor type of ", type); + ORT_THROW("OpTester:Check() not implemented for output tensor type of ", type); } template @@ -158,7 +158,7 @@ void OpTester::SetOutputAbsErr(const char* name, float v) { [name](Data& data) { return (data.def_.Name() == name); }); - ONNXRUNTIME_ENFORCE(it != output_data_.end()); + ORT_ENFORCE(it != output_data_.end()); it->absolute_error_ = optional(v); } @@ -169,7 +169,7 @@ void OpTester::SetOutputRelErr(const char* name, float v) { [name](Data& data) { return (data.def_.Name() == name); }); - ONNXRUNTIME_ENFORCE(it != output_data_.end()); + ORT_ENFORCE(it != output_data_.end()); it->relative_error_ = optional(v); } @@ -257,7 +257,7 @@ void OpTester::Run(ExpectResult expect_result, try { status = graph.Resolve(); } catch (const std::exception& ex) { - status = ONNXRUNTIME_MAKE_STATUS(ONNXRUNTIME, FAIL, ex.what()); + status = ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, ex.what()); } } else { status = graph.Resolve(); diff --git a/onnxruntime/test/providers/provider_test_utils.h b/onnxruntime/test/providers/provider_test_utils.h index 064971431e0d0..e9287c6cfeb19 100644 --- a/onnxruntime/test/providers/provider_test_utils.h +++ b/onnxruntime/test/providers/provider_test_utils.h @@ -31,7 +31,7 @@ class optional { optional() : has_value_(false) {} bool has_value() const { return has_value_; } const T& value() const { - ONNXRUNTIME_ENFORCE(has_value_); + ORT_ENFORCE(has_value_); return value_; } @@ -270,7 +270,7 @@ class OpTester { int64_t values_count, bool is_initializer = false) { try { TensorShape shape{dims}; - ONNXRUNTIME_ENFORCE(shape.Size() == values_count, values_count, + ORT_ENFORCE(shape.Size() == values_count, values_count, " input values doesn't match tensor size of ", shape.Size()); auto allocator = test::AllocatorManager::Instance().GetAllocator(CPU); diff --git a/onnxruntime/test/shared_lib/fns_candy_style_transfer.c b/onnxruntime/test/shared_lib/fns_candy_style_transfer.c index 2a8d13e105ebb..b26bb4df91857 100644 --- a/onnxruntime/test/shared_lib/fns_candy_style_transfer.c +++ b/onnxruntime/test/shared_lib/fns_candy_style_transfer.c @@ -6,15 +6,15 @@ #include #include -#define ONNXRUNTIME_ABORT_ON_ERROR(expr) \ - do { \ - ONNXStatus* onnx_status = (expr); \ - if (onnx_status != NULL) { \ - const char* msg = ONNXRuntimeGetErrorMessage(onnx_status); \ - fprintf(stderr, "%s\n", msg); \ - ReleaseONNXStatus(onnx_status); \ - abort(); \ - } \ +#define ORT_ABORT_ON_ERROR(expr) \ + do { \ + ONNXStatus* onnx_status = (expr); \ + if (onnx_status != NULL) { \ + const char* msg = OrtGetErrorMessage(onnx_status); \ + fprintf(stderr, "%s\n", msg); \ + ReleaseONNXStatus(onnx_status); \ + abort(); \ + } \ } while (0); /** @@ -95,21 +95,21 @@ static int read_png_file(const char* input_file, size_t* height, size_t* width, * \param tensor should be a float tensor in [N,C,H,W] format */ static int write_tensor_to_png_file(ONNXValue* tensor, const char* output_file) { - struct ONNXRuntimeTensorTypeAndShapeInfo* shape_info; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeGetTensorShapeAndType(tensor, &shape_info)); - size_t dim_count = ONNXRuntimeGetNumOfDimensions(shape_info); + struct OrtTensorTypeAndShapeInfo* shape_info; + ORT_ABORT_ON_ERROR(OrtGetTensorShapeAndType(tensor, &shape_info)); + size_t dim_count = OrtGetNumOfDimensions(shape_info); if (dim_count != 4) { printf("output tensor must have 4 dimensions"); return -1; } int64_t dims[4]; - ONNXRuntimeGetDimensions(shape_info, dims, sizeof(dims) / sizeof(dims[0])); + OrtGetDimensions(shape_info, dims, sizeof(dims) / sizeof(dims[0])); if (dims[0] != 1 || dims[1] != 3) { printf("output tensor shape error"); return -1; } float* f; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeGetTensorMutableData(tensor, (void**)&f)); + ORT_ABORT_ON_ERROR(OrtGetTensorMutableData(tensor, (void**)&f)); png_bytep model_output_bytes; png_image image; memset(&image, 0, (sizeof image)); @@ -145,23 +145,23 @@ int run_inference(ONNXSession* session, const char* input_file, const char* outp free(model_input); return -1; } - ONNXRuntimeAllocatorInfo* allocator_info; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeCreateCpuAllocatorInfo(ONNXRuntimeArenaAllocator, ONNXRuntimeMemTypeDefault, &allocator_info)); + OrtAllocatorInfo* allocator_info; + ORT_ABORT_ON_ERROR(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info)); const size_t input_shape[] = {1, 3, 720, 720}; const size_t input_shape_len = sizeof(input_shape) / sizeof(input_shape[0]); const size_t model_input_len = model_input_ele_count * sizeof(float); ONNXValue* input_tensor = NULL; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeCreateTensorWithDataAsONNXValue(allocator_info, model_input, model_input_len, input_shape, input_shape_len, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor)); + ORT_ABORT_ON_ERROR(OrtCreateTensorWithDataAsONNXValue(allocator_info, model_input, model_input_len, input_shape, input_shape_len, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor)); assert(input_tensor != NULL); - assert(ONNXRuntimeIsTensor(input_tensor) != 0); - ReleaseONNXRuntimeAllocatorInfo(allocator_info); + assert(OrtIsTensor(input_tensor) != 0); + ReleaseOrtAllocatorInfo(allocator_info); const char* input_names[] = {"inputImage"}; const char* output_names[] = {"outputImage"}; ONNXValue* output_tensor = NULL; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeRunInference(session, NULL, input_names, (const ONNXValue* const*)&input_tensor, 1, output_names, 1, &output_tensor)); + ORT_ABORT_ON_ERROR(OrtRunInference(session, NULL, input_names, (const ONNXValue* const*)&input_tensor, 1, output_names, 1, &output_tensor)); assert(output_tensor != NULL); - assert(ONNXRuntimeIsTensor(output_tensor) != 0); + assert(OrtIsTensor(output_tensor) != 0); int ret = 0; if (write_tensor_to_png_file(output_tensor, output_file) != 0) { ret = -1; @@ -174,18 +174,18 @@ int run_inference(ONNXSession* session, const char* input_file, const char* outp void verify_input_output_count(ONNXSession* session) { size_t count; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeInferenceSessionGetInputCount(session, &count)); + ORT_ABORT_ON_ERROR(OrtInferenceSessionGetInputCount(session, &count)); assert(count == 1); - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeInferenceSessionGetOutputCount(session, &count)); + ORT_ABORT_ON_ERROR(OrtInferenceSessionGetOutputCount(session, &count)); assert(count == 1); } #ifdef USE_CUDA -void enable_cuda(ONNXRuntimeSessionOptions* session_option) { - ONNXRuntimeProviderFactoryInterface** factory; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeCreateCUDAExecutionProviderFactory(0, &factory)); - ONNXRuntimeSessionOptionsAppendExecutionProvider(session_option, factory); - ONNXRuntimeReleaseObject(factory); +void enable_cuda(OrtSessionOptions* session_option) { + OrtProviderFactoryInterface** factory; + ORT_ABORT_ON_ERROR(OrtCreateCUDAExecutionProviderFactory(0, &factory)); + OrtSessionOptionsAppendExecutionProvider(session_option, factory); + OrtReleaseObject(factory); } #endif @@ -197,19 +197,19 @@ int main(int argc, char* argv[]) { char* model_path = argv[1]; char* input_file = argv[2]; char* output_file = argv[3]; - ONNXRuntimeEnv* env; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeInitialize(ONNXRUNTIME_LOGGING_LEVEL_kWARNING, "test", &env)); - ONNXRuntimeSessionOptions* session_option = ONNXRuntimeCreateSessionOptions(); + OrtEnv* env; + ORT_ABORT_ON_ERROR(OrtInitialize(ORT_LOGGING_LEVEL_kWARNING, "test", &env)); + OrtSessionOptions* session_option = OrtCreateSessionOptions(); #ifdef USE_CUDA enable_cuda(session_option); #endif ONNXSession* session; - ONNXRUNTIME_ABORT_ON_ERROR(ONNXRuntimeCreateInferenceSession(env, model_path, session_option, &session)); + ORT_ABORT_ON_ERROR(OrtCreateInferenceSession(env, model_path, session_option, &session)); verify_input_output_count(session); int ret = run_inference(session, input_file, output_file); - ONNXRuntimeReleaseObject(session_option); + OrtReleaseObject(session_option); ReleaseONNXSession(session); - ONNXRuntimeReleaseObject(env); + OrtReleaseObject(env); if (ret != 0) { fprintf(stderr, "fail\n"); } diff --git a/onnxruntime/test/shared_lib/test_allocator.cc b/onnxruntime/test/shared_lib/test_allocator.cc index b7d583270e28e..ef4921d5996a9 100644 --- a/onnxruntime/test/shared_lib/test_allocator.cc +++ b/onnxruntime/test/shared_lib/test_allocator.cc @@ -9,26 +9,26 @@ using namespace onnxruntime; TEST_F(CApiTest, allocation_info) { - ONNXRuntimeAllocatorInfo *info1, *info2; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateAllocatorInfo("Cpu", ONNXRuntimeArenaAllocator, 0, ONNXRuntimeMemTypeDefault, &info1)); - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCpuAllocatorInfo(ONNXRuntimeArenaAllocator, ONNXRuntimeMemTypeDefault, &info2)); - ASSERT_EQ(0, ONNXRuntimeCompareAllocatorInfo(info1, info2)); - ReleaseONNXRuntimeAllocatorInfo(info1); - ReleaseONNXRuntimeAllocatorInfo(info2); + OrtAllocatorInfo *info1, *info2; + ORT_THROW_ON_ERROR(OrtCreateAllocatorInfo("Cpu", OrtArenaAllocator, 0, OrtMemTypeDefault, &info1)); + ORT_THROW_ON_ERROR(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &info2)); + ASSERT_EQ(0, OrtCompareAllocatorInfo(info1, info2)); + ReleaseOrtAllocatorInfo(info1); + ReleaseOrtAllocatorInfo(info2); } TEST_F(CApiTest, DefaultAllocator) { - std::unique_ptr default_allocator; + std::unique_ptr default_allocator; { - ONNXRuntimeAllocator* ptr; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateDefaultAllocator(&ptr)); + OrtAllocator* ptr; + ORT_THROW_ON_ERROR(OrtCreateDefaultAllocator(&ptr)); default_allocator.reset(ptr); } - char* p = (char*)ONNXRuntimeAllocatorAlloc(default_allocator.get(), 100); + char* p = (char*)OrtAllocatorAlloc(default_allocator.get(), 100); ASSERT_NE(p, nullptr); memset(p, 0, 100); - ONNXRuntimeAllocatorFree(default_allocator.get(), p); - const ONNXRuntimeAllocatorInfo* info1 = ONNXRuntimeAllocatorGetInfo(default_allocator.get()); - const ONNXRuntimeAllocatorInfo* info2 = (*default_allocator)->Info(default_allocator.get()); - ASSERT_EQ(0, ONNXRuntimeCompareAllocatorInfo(info1, info2)); + OrtAllocatorFree(default_allocator.get(), p); + const OrtAllocatorInfo* info1 = OrtAllocatorGetInfo(default_allocator.get()); + const OrtAllocatorInfo* info2 = (*default_allocator)->Info(default_allocator.get()); + ASSERT_EQ(0, OrtCompareAllocatorInfo(info1, info2)); } diff --git a/onnxruntime/test/shared_lib/test_fixture.h b/onnxruntime/test/shared_lib/test_fixture.h index 7c5ac1fd8ee4e..d510240fc3bf5 100644 --- a/onnxruntime/test/shared_lib/test_fixture.h +++ b/onnxruntime/test/shared_lib/test_fixture.h @@ -13,23 +13,23 @@ typedef const char* PATH_TYPE; #endif //empty -static inline void ONNXRUNTIME_API_CALL MyLoggingFunction(void*, ONNXRuntimeLoggingLevel, const char*, const char*, const char*, const char*) { +static inline void ORT_API_CALL MyLoggingFunction(void*, OrtLoggingLevel, const char*, const char*, const char*, const char*) { } template class CApiTestImpl : public ::testing::Test { protected: - ONNXRuntimeEnv* env = nullptr; + OrtEnv* env = nullptr; void SetUp() override { if (use_customer_logger) { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInitializeWithCustomLogger(MyLoggingFunction, nullptr, ONNXRUNTIME_LOGGING_LEVEL_kINFO, "Default", &env)); + ORT_THROW_ON_ERROR(OrtInitializeWithCustomLogger(MyLoggingFunction, nullptr, ORT_LOGGING_LEVEL_kINFO, "Default", &env)); } else { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInitialize(ONNXRUNTIME_LOGGING_LEVEL_kINFO, "Default", &env)); + ORT_THROW_ON_ERROR(OrtInitialize(ORT_LOGGING_LEVEL_kINFO, "Default", &env)); } } void TearDown() override { - if (env) ONNXRuntimeReleaseObject(env); + if (env) OrtReleaseObject(env); } // Objects declared here can be used by all tests in the test case for Foo. diff --git a/onnxruntime/test/shared_lib/test_inference.cc b/onnxruntime/test/shared_lib/test_inference.cc index 3b736f35ec1df..b5d3c10fb9791 100644 --- a/onnxruntime/test/shared_lib/test_inference.cc +++ b/onnxruntime/test/shared_lib/test_inference.cc @@ -13,32 +13,32 @@ using namespace onnxruntime; -void RunSession(ONNXRuntimeAllocator* env, ONNXSession* session_object, +void RunSession(OrtAllocator* env, ONNXSession* session_object, const std::vector& dims_x, const std::vector& values_x, const std::vector& dims_y, const std::vector& values_y) { std::unique_ptr value_x(nullptr, ReleaseONNXValue); std::vector inputs(1); - inputs[0] = ONNXRuntimeCreateTensorAsONNXValue(env, dims_x, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT); + inputs[0] = OrtCreateTensorAsONNXValue(env, dims_x, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT); value_x.reset(inputs[0]); void* raw_data; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorMutableData(inputs[0], &raw_data)); + ORT_THROW_ON_ERROR(OrtGetTensorMutableData(inputs[0], &raw_data)); memcpy(raw_data, values_x.data(), values_x.size() * sizeof(values_x[0])); std::vector input_names{"X"}; ONNXValue* output_tensor = nullptr; const char* output_names[] = {"Y"}; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeRunInference(session_object, NULL, input_names.data(), inputs.data(), inputs.size(), output_names, 1, &output_tensor)); + ORT_THROW_ON_ERROR(OrtRunInference(session_object, NULL, input_names.data(), inputs.data(), inputs.size(), output_names, 1, &output_tensor)); ASSERT_NE(output_tensor, nullptr); - std::unique_ptr shape_info; + std::unique_ptr shape_info; { - ONNXRuntimeTensorTypeAndShapeInfo* shape_info_ptr; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorShapeAndType(output_tensor, &shape_info_ptr)); + OrtTensorTypeAndShapeInfo* shape_info_ptr; + ORT_THROW_ON_ERROR(OrtGetTensorShapeAndType(output_tensor, &shape_info_ptr)); shape_info.reset(shape_info_ptr); } - size_t rtensor_dims = ONNXRuntimeGetNumOfDimensions(shape_info.get()); + size_t rtensor_dims = OrtGetNumOfDimensions(shape_info.get()); std::vector shape_array(rtensor_dims); - ONNXRuntimeGetDimensions(shape_info.get(), shape_array.data(), shape_array.size()); + OrtGetDimensions(shape_info.get(), shape_array.data(), shape_array.size()); ASSERT_EQ(shape_array, dims_y); size_t total_len = 1; for (size_t i = 0; i != rtensor_dims; ++i) { @@ -46,7 +46,7 @@ void RunSession(ONNXRuntimeAllocator* env, ONNXSession* session_object, } ASSERT_EQ(values_y.size(), total_len); float* f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorMutableData(output_tensor, (void**)&f)); + ORT_THROW_ON_ERROR(OrtGetTensorMutableData(output_tensor, (void**)&f)); for (size_t i = 0; i != total_len; ++i) { ASSERT_EQ(values_y[i], f[i]); } @@ -54,7 +54,7 @@ void RunSession(ONNXRuntimeAllocator* env, ONNXSession* session_object, } template -void TestInference(ONNXRuntimeEnv* env, T model_uri, +void TestInference(OrtEnv* env, T model_uri, const std::vector& dims_x, const std::vector& values_x, const std::vector& expected_dims_y, @@ -64,30 +64,30 @@ void TestInference(ONNXRuntimeEnv* env, T model_uri, if (provider_type == 1) { #ifdef USE_CUDA - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCUDAExecutionProviderFactory(0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateCUDAExecutionProviderFactory(0, &f)); sf.AppendExecutionProvider(f); - ONNXRuntimeReleaseObject(f); + OrtReleaseObject(f); std::cout << "Running simple inference with cuda provider" << std::endl; #else return; #endif } else if (provider_type == 2) { #ifdef USE_MKLDNN - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateMkldnnExecutionProviderFactory(1, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateMkldnnExecutionProviderFactory(1, &f)); sf.AppendExecutionProvider(f); - ONNXRuntimeReleaseObject(f); + OrtReleaseObject(f); std::cout << "Running simple inference with mkldnn provider" << std::endl; #else return; #endif } else if (provider_type == 3) { #ifdef USE_NUPHAR - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateNupharExecutionProviderFactory(0, "", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateNupharExecutionProviderFactory(0, "", &f)); sf.AppendExecutionProvider(f); - ONNXRuntimeReleaseObject(f); + OrtReleaseObject(f); std::cout << "Running simple inference with nuphar provider" << std::endl; #else return; @@ -98,8 +98,8 @@ void TestInference(ONNXRuntimeEnv* env, T model_uri, if (custom_op) { sf.AddCustomOp("libonnxruntime_custom_op_shared_lib_test.so"); } - std::unique_ptr inference_session(sf.ONNXRuntimeCreateInferenceSession(model_uri), ReleaseONNXSession); - std::unique_ptr default_allocator(MockedONNXRuntimeAllocator::Create()); + std::unique_ptr inference_session(sf.OrtCreateInferenceSession(model_uri), ReleaseONNXSession); + std::unique_ptr default_allocator(MockedOrtAllocator::Create()); // Now run RunSession(default_allocator.get(), inference_session.get(), dims_x, values_x, expected_dims_y, expected_values_y); } @@ -144,11 +144,11 @@ TEST_F(CApiTest, DISABLED_custom_op) { } #endif -#ifdef ONNXRUNTIME_RUN_EXTERNAL_ONNX_TESTS +#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS TEST_F(CApiTest, create_session_without_session_option) { constexpr PATH_TYPE model_uri = TSTR("../models/opset8/test_squeezenet/model.onnx"); ONNXSession* ret; - ONNXRUNTIME_THROW_ON_ERROR(::ONNXRuntimeCreateInferenceSession(env, model_uri, nullptr, &ret)); + ORT_THROW_ON_ERROR(::OrtCreateInferenceSession(env, model_uri, nullptr, &ret)); ASSERT_NE(nullptr, ret); ReleaseONNXSession(ret); } @@ -156,47 +156,47 @@ TEST_F(CApiTest, create_session_without_session_option) { TEST_F(CApiTest, create_tensor) { const char* s[] = {"abc", "kmp"}; size_t expected_len = 2; - std::unique_ptr default_allocator(MockedONNXRuntimeAllocator::Create()); + std::unique_ptr default_allocator(MockedOrtAllocator::Create()); { std::unique_ptr tensor( - ONNXRuntimeCreateTensorAsONNXValue(default_allocator.get(), {expected_len}, ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING), ReleaseONNXValue); - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeFillStringTensor(tensor.get(), s, expected_len)); - std::unique_ptr shape_info; + OrtCreateTensorAsONNXValue(default_allocator.get(), {expected_len}, ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING), ReleaseONNXValue); + ORT_THROW_ON_ERROR(OrtFillStringTensor(tensor.get(), s, expected_len)); + std::unique_ptr shape_info; { - ONNXRuntimeTensorTypeAndShapeInfo* shape_info_ptr; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorShapeAndType(tensor.get(), &shape_info_ptr)); + OrtTensorTypeAndShapeInfo* shape_info_ptr; + ORT_THROW_ON_ERROR(OrtGetTensorShapeAndType(tensor.get(), &shape_info_ptr)); shape_info.reset(shape_info_ptr); } - size_t len = static_cast(ONNXRuntimeGetTensorShapeElementCount(shape_info.get())); + size_t len = static_cast(OrtGetTensorShapeElementCount(shape_info.get())); ASSERT_EQ(len, expected_len); std::vector shape_array(len); size_t data_len; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetStringTensorDataLength(tensor.get(), &data_len)); + ORT_THROW_ON_ERROR(OrtGetStringTensorDataLength(tensor.get(), &data_len)); std::string result(data_len, '\0'); std::vector offsets(len); - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetStringTensorContent(tensor.get(), (void*)result.data(), data_len, offsets.data(), offsets.size())); + ORT_THROW_ON_ERROR(OrtGetStringTensorContent(tensor.get(), (void*)result.data(), data_len, offsets.data(), offsets.size())); } } TEST_F(CApiTest, create_tensor_with_data) { float values[] = {3.0f, 1.0f, 2.f, 0.f}; constexpr size_t values_length = sizeof(values) / sizeof(values[0]); - ONNXRuntimeAllocatorInfo* info; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateAllocatorInfo("Cpu", ONNXRuntimeDeviceAllocator, 0, ONNXRuntimeMemTypeDefault, &info)); + OrtAllocatorInfo* info; + ORT_THROW_ON_ERROR(OrtCreateAllocatorInfo("Cpu", OrtDeviceAllocator, 0, OrtMemTypeDefault, &info)); std::vector dims = {4}; std::unique_ptr tensor( - ONNXRuntimeCreateTensorWithDataAsONNXValue(info, values, values_length * sizeof(float), dims, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT), ReleaseONNXValue); - ReleaseONNXRuntimeAllocatorInfo(info); + OrtCreateTensorWithDataAsONNXValue(info, values, values_length * sizeof(float), dims, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT), ReleaseONNXValue); + ReleaseOrtAllocatorInfo(info); void* new_pointer; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorMutableData(tensor.get(), &new_pointer)); + ORT_THROW_ON_ERROR(OrtGetTensorMutableData(tensor.get(), &new_pointer)); ASSERT_EQ(new_pointer, values); - struct ONNXRuntimeTypeInfo* type_info; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTypeInfo(tensor.get(), &type_info)); - const struct ONNXRuntimeTensorTypeAndShapeInfo* tensor_info = ONNXRuntimeCastTypeInfoToTensorInfo(type_info); + struct OrtTypeInfo* type_info; + ORT_THROW_ON_ERROR(OrtGetTypeInfo(tensor.get(), &type_info)); + const struct OrtTensorTypeAndShapeInfo* tensor_info = OrtCastTypeInfoToTensorInfo(type_info); ASSERT_NE(tensor_info, nullptr); - ASSERT_EQ(1, ONNXRuntimeGetNumOfDimensions(tensor_info)); - ONNXRuntimeReleaseObject(type_info); + ASSERT_EQ(1, OrtGetNumOfDimensions(tensor_info)); + OrtReleaseObject(type_info); } int main(int argc, char** argv) { diff --git a/onnxruntime/test/shared_lib/test_io_types.cc b/onnxruntime/test/shared_lib/test_io_types.cc index 1b815a81f29fb..60caf4f18c4c1 100644 --- a/onnxruntime/test/shared_lib/test_io_types.cc +++ b/onnxruntime/test/shared_lib/test_io_types.cc @@ -9,37 +9,37 @@ using namespace onnxruntime; static void TestModelInfo(const ONNXSession* inference_session, bool is_input, const std::vector& dims) { size_t input_count; if (is_input) { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInferenceSessionGetInputCount(inference_session, &input_count)); + ORT_THROW_ON_ERROR(OrtInferenceSessionGetInputCount(inference_session, &input_count)); } else { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInferenceSessionGetOutputCount(inference_session, &input_count)); + ORT_THROW_ON_ERROR(OrtInferenceSessionGetOutputCount(inference_session, &input_count)); } ASSERT_EQ(1, input_count); - std::unique_ptr input_type_info; + std::unique_ptr input_type_info; { - ONNXRuntimeTypeInfo* t; + OrtTypeInfo* t; if (is_input) { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInferenceSessionGetInputTypeInfo(inference_session, 0, &t)); + ORT_THROW_ON_ERROR(OrtInferenceSessionGetInputTypeInfo(inference_session, 0, &t)); } else { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeInferenceSessionGetOutputTypeInfo(inference_session, 0, &t)); + ORT_THROW_ON_ERROR(OrtInferenceSessionGetOutputTypeInfo(inference_session, 0, &t)); } input_type_info.reset(t); } ASSERT_NE(nullptr, input_type_info); - const ONNXRuntimeTensorTypeAndShapeInfo* p = ONNXRuntimeCastTypeInfoToTensorInfo(input_type_info.get()); + const OrtTensorTypeAndShapeInfo* p = OrtCastTypeInfoToTensorInfo(input_type_info.get()); ASSERT_NE(nullptr, p); - enum OnnxRuntimeTensorElementDataType ele_type = ONNXRuntimeGetTensorElementType(p); + enum OrtTensorElementDataType ele_type = OrtGetTensorElementType(p); ASSERT_EQ(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, ele_type); - ASSERT_EQ(dims.size(), ONNXRuntimeGetNumOfDimensions(p)); + ASSERT_EQ(dims.size(), OrtGetNumOfDimensions(p)); std::vector real_dims(dims.size()); - ONNXRuntimeGetDimensions(p, real_dims.data(), real_dims.size()); + OrtGetDimensions(p, real_dims.data(), real_dims.size()); ASSERT_EQ(real_dims, dims); } TEST_F(CApiTest, input_output_type_info) { SessionOptionsWrapper sf(env); constexpr PATH_TYPE model_uri = TSTR("../models/opset8/test_squeezenet/model.onnx"); - std::unique_ptr inference_session(sf.ONNXRuntimeCreateInferenceSession(model_uri), ReleaseONNXSession); + std::unique_ptr inference_session(sf.OrtCreateInferenceSession(model_uri), ReleaseONNXSession); TestModelInfo(inference_session.get(), true, {1, 3, 224, 224}); TestModelInfo(inference_session.get(), false, {1, 1000, 1, 1}); } diff --git a/onnxruntime/test/shared_lib/test_run_options.cc b/onnxruntime/test/shared_lib/test_run_options.cc index 0c9e5cb5661a8..18cf2c369d3c8 100644 --- a/onnxruntime/test/shared_lib/test_run_options.cc +++ b/onnxruntime/test/shared_lib/test_run_options.cc @@ -6,10 +6,10 @@ using namespace onnxruntime; TEST_F(CApiTest, run_options) { - std::unique_ptr options(ONNXRuntimeCreateRunOptions()); + std::unique_ptr options(OrtCreateRunOptions()); ASSERT_NE(options, nullptr); - ASSERT_EQ(ONNXRuntimeRunOptionsSetRunLogVerbosityLevel(options.get(), 1), nullptr); - ASSERT_EQ(ONNXRuntimeRunOptionsSetRunTag(options.get(), "abc"), nullptr); - ASSERT_STREQ(ONNXRuntimeRunOptionsGetRunTag(options.get()), "abc"); - ASSERT_EQ(ONNXRuntimeRunOptionsGetRunLogVerbosityLevel(options.get()), (unsigned)1); + ASSERT_EQ(OrtRunOptionsSetRunLogVerbosityLevel(options.get(), 1), nullptr); + ASSERT_EQ(OrtRunOptionsSetRunTag(options.get(), "abc"), nullptr); + ASSERT_STREQ(OrtRunOptionsGetRunTag(options.get()), "abc"); + ASSERT_EQ(OrtRunOptionsGetRunLogVerbosityLevel(options.get()), (unsigned)1); } diff --git a/onnxruntime/test/shared_lib/test_session_options.cc b/onnxruntime/test/shared_lib/test_session_options.cc index 1cb8c01147812..8f03acd8b4806 100644 --- a/onnxruntime/test/shared_lib/test_session_options.cc +++ b/onnxruntime/test/shared_lib/test_session_options.cc @@ -7,6 +7,6 @@ using namespace onnxruntime; TEST_F(CApiTest, session_options) { - std::unique_ptr options(ONNXRuntimeCreateSessionOptions()); + std::unique_ptr options(OrtCreateSessionOptions()); ASSERT_NE(options, nullptr); } diff --git a/onnxruntime/test/tvm/tvm_basic_test.cc b/onnxruntime/test/tvm/tvm_basic_test.cc index 793fc9262953c..1d046d62a24cb 100644 --- a/onnxruntime/test/tvm/tvm_basic_test.cc +++ b/onnxruntime/test/tvm/tvm_basic_test.cc @@ -157,7 +157,7 @@ static void RunSession(InferenceSession& session_object, std::vector& values_y) { // prepare inputs MLValue ml_value; - CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, ONNXRuntimeMemTypeDefault), dims_x, values_x, &ml_value); + CreateMLValue(TestCPUExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_x, values_x, &ml_value); NameMLValMap feeds; feeds.insert(std::make_pair("X1", ml_value)); diff --git a/onnxruntime/test/util/compare_mlvalue.cc b/onnxruntime/test/util/compare_mlvalue.cc index 9db76014d30a1..e4604c1b0e51b 100644 --- a/onnxruntime/test/util/compare_mlvalue.cc +++ b/onnxruntime/test/util/compare_mlvalue.cc @@ -18,7 +18,7 @@ using namespace onnxruntime; namespace { -OnnxRuntimeTensorElementDataType CApiElementTypeFromProto(ONNX_NAMESPACE::TensorProto_DataType type) { +OrtTensorElementDataType CApiElementTypeFromProto(ONNX_NAMESPACE::TensorProto_DataType type) { switch (type) { CASE_TYPE(FLOAT) CASE_TYPE(UINT8) @@ -316,7 +316,7 @@ std::pair CompareMLValue(const MLValue& o, const ML std::pair VerifyValueInfo(const ONNX_NAMESPACE::ValueInfoProto& v, const ONNXValue* o) { if (!v.has_type()) return std::make_pair(COMPARE_RESULT::SUCCESS, ""); if (v.type().has_tensor_type()) { - if (ONNXRuntimeIsTensor(o) == 0) { + if (OrtIsTensor(o) == 0) { return std::make_pair(COMPARE_RESULT::TYPE_MISMATCH, ""); } @@ -325,14 +325,14 @@ std::pair VerifyValueInfo(const ONNX_NAMESPACE::Val //if (((TensorTypeBase*)o.Type())->GetElementType() != DataTypeImpl::ElementTypeFromProto(t.elem_type())) { // return COMPARE_RESULT::TYPE_MISMATCH; //} - std::unique_ptr info; + std::unique_ptr info; { - ONNXRuntimeTensorTypeAndShapeInfo* t1; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorShapeAndType(o, &t1)); + OrtTensorTypeAndShapeInfo* t1; + ORT_THROW_ON_ERROR(OrtGetTensorShapeAndType(o, &t1)); info.reset(t1); } - OnnxRuntimeTensorElementDataType real_type = ONNXRuntimeGetTensorElementType(info.get()); - OnnxRuntimeTensorElementDataType expected_type = CApiElementTypeFromProto(t.elem_type()); + OrtTensorElementDataType real_type = OrtGetTensorElementType(info.get()); + OrtTensorElementDataType expected_type = CApiElementTypeFromProto(t.elem_type()); if (real_type != expected_type) { return std::make_pair(COMPARE_RESULT::TYPE_MISMATCH, ""); } diff --git a/onnxruntime/test/util/default_providers.cc b/onnxruntime/test/util/default_providers.cc index d7dad6a82c722..6d386c648bdb7 100644 --- a/onnxruntime/test/util/default_providers.cc +++ b/onnxruntime/test/util/default_providers.cc @@ -5,26 +5,26 @@ #include "providers.h" #include "core/session/onnxruntime_cxx_api.h" #define FACTORY_PTR_HOLDER \ - std::unique_ptr ptr_holder_(f); + std::unique_ptr ptr_holder_(f); namespace onnxruntime { namespace test { std::unique_ptr DefaultCpuExecutionProvider(bool enable_arena) { - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCpuExecutionProviderFactory(enable_arena ? 1 : 0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateCpuExecutionProviderFactory(enable_arena ? 1 : 0, &f)); FACTORY_PTR_HOLDER; - ONNXRuntimeProvider* out; - ONNXRUNTIME_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); + OrtProvider* out; + ORT_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); return std::unique_ptr((IExecutionProvider*)out); } std::unique_ptr DefaultCudaExecutionProvider() { #ifdef USE_CUDA - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateCUDAExecutionProviderFactory(0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateCUDAExecutionProviderFactory(0, &f)); FACTORY_PTR_HOLDER; - ONNXRuntimeProvider* out; - ONNXRUNTIME_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); + OrtProvider* out; + ORT_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); return std::unique_ptr((IExecutionProvider*)out); #else return nullptr; @@ -33,25 +33,25 @@ std::unique_ptr DefaultCudaExecutionProvider() { std::unique_ptr DefaultMkldnnExecutionProvider(bool enable_arena) { #ifdef USE_MKLDNN - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateMkldnnExecutionProviderFactory(enable_arena ? 1 : 0, &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateMkldnnExecutionProviderFactory(enable_arena ? 1 : 0, &f)); FACTORY_PTR_HOLDER; - ONNXRuntimeProvider* out; - ONNXRUNTIME_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); + OrtProvider* out; + ORT_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); return std::unique_ptr((IExecutionProvider*)out); #else - ONNXRUNTIME_UNUSED_PARAMETER(enable_arena); + ORT_UNUSED_PARAMETER(enable_arena); return nullptr; #endif } std::unique_ptr DefaultNupharExecutionProvider() { #ifdef USE_NUPHAR - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateNupharExecutionProviderFactory(0, "", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateNupharExecutionProviderFactory(0, "", &f)); FACTORY_PTR_HOLDER; - ONNXRuntimeProvider* out; - ONNXRUNTIME_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); + OrtProvider* out; + ORT_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); return std::unique_ptr((IExecutionProvider*)out); #else return nullptr; @@ -60,11 +60,11 @@ std::unique_ptr DefaultNupharExecutionProvider() { std::unique_ptr DefaultBrainSliceExecutionProvider() { #ifdef USE_BRAINSLICE - ONNXRuntimeProviderFactoryInterface** f; - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateBrainSliceExecutionProviderFactory(0, true, "testdata/firmwares/onnx_rnns/instructions.bin", "testdata/firmwares/onnx_rnns/data.bin", "testdata/firmwares/onnx_rnns/schema.bin", &f)); + OrtProviderFactoryInterface** f; + ORT_THROW_ON_ERROR(OrtCreateBrainSliceExecutionProviderFactory(0, true, "testdata/firmwares/onnx_rnns/instructions.bin", "testdata/firmwares/onnx_rnns/data.bin", "testdata/firmwares/onnx_rnns/schema.bin", &f)); FACTORY_PTR_HOLDER; - ONNXRuntimeProvider* out; - ONNXRUNTIME_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); + OrtProvider* out; + ORT_THROW_ON_ERROR((*f)->CreateProvider(f, &out)); return std::unique_ptr((IExecutionProvider*)out); #else return nullptr; diff --git a/onnxruntime/test/util/include/test/test_environment.h b/onnxruntime/test/util/include/test/test_environment.h index 79dd5413c3232..0d24d82353f27 100644 --- a/onnxruntime/test/util/include/test/test_environment.h +++ b/onnxruntime/test/util/include/test/test_environment.h @@ -29,7 +29,7 @@ class TestEnvironment { ~TestEnvironment(); private: - ONNXRUNTIME_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TestEnvironment); + ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TestEnvironment); #ifdef HAVE_FRAMEWORK_LIB std::unique_ptr runtime_environment_; diff --git a/onnxruntime/test/util/include/test_allocator.h b/onnxruntime/test/util/include/test_allocator.h index 7a0d03088c61a..1ef5929e1d8b2 100644 --- a/onnxruntime/test/util/include/test_allocator.h +++ b/onnxruntime/test/util/include/test_allocator.h @@ -7,57 +7,57 @@ #include "core/session/onnxruntime_cxx_api.h" #include -#define ONNXRUNTIME_ALLOCATOR_IMPL_BEGIN(CLASS_NAME) \ - class CLASS_NAME { \ - private: \ - const ONNXRuntimeAllocatorInteface* vtable_ = &table_; \ - std::atomic_int ref_count_; \ - static void* ONNXRUNTIME_API_CALL Alloc_(void* this_ptr, size_t size) { \ - return ((CLASS_NAME*)this_ptr)->Alloc(size); \ - } \ - static void ONNXRUNTIME_API_CALL Free_(void* this_ptr, void* p) { \ - return ((CLASS_NAME*)this_ptr)->Free(p); \ - } \ - static const ONNXRuntimeAllocatorInfo* ONNXRUNTIME_API_CALL Info_(const void* this_ptr) { \ - return ((const CLASS_NAME*)this_ptr)->Info(); \ - } \ - static uint32_t ONNXRUNTIME_API_CALL AddRef_(void* this_) { \ - CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ - return ++this_ptr->ref_count_; \ - } \ - static uint32_t ONNXRUNTIME_API_CALL Release_(void* this_) { \ - CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ - uint32_t ret = --this_ptr->ref_count_; \ - if (ret == 0) \ - delete this_ptr; \ - return 0; \ - } \ - static ONNXRuntimeAllocatorInteface table_; +#define ORT_ALLOCATOR_IMPL_BEGIN(CLASS_NAME) \ + class CLASS_NAME { \ + private: \ + const OrtAllocatorInterface* vtable_ = &table_; \ + std::atomic_int ref_count_; \ + static void* ORT_API_CALL Alloc_(void* this_ptr, size_t size) { \ + return ((CLASS_NAME*)this_ptr)->Alloc(size); \ + } \ + static void ORT_API_CALL Free_(void* this_ptr, void* p) { \ + return ((CLASS_NAME*)this_ptr)->Free(p); \ + } \ + static const OrtAllocatorInfo* ORT_API_CALL Info_(const void* this_ptr) { \ + return ((const CLASS_NAME*)this_ptr)->Info(); \ + } \ + static uint32_t ORT_API_CALL AddRef_(void* this_) { \ + CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ + return ++this_ptr->ref_count_; \ + } \ + static uint32_t ORT_API_CALL Release_(void* this_) { \ + CLASS_NAME* this_ptr = (CLASS_NAME*)this_; \ + uint32_t ret = --this_ptr->ref_count_; \ + if (ret == 0) \ + delete this_ptr; \ + return 0; \ + } \ + static OrtAllocatorInterface table_; -#define ONNXRUNTIME_ALLOCATOR_IMPL_END \ - } \ +#define ORT_ALLOCATOR_IMPL_END \ + } \ ; -ONNXRUNTIME_ALLOCATOR_IMPL_BEGIN(MockedONNXRuntimeAllocator) +ORT_ALLOCATOR_IMPL_BEGIN(MockedOrtAllocator) private: std::atomic memory_inuse; -ONNXRuntimeAllocatorInfo* cpuAllocatorInfo; -MockedONNXRuntimeAllocator() : ref_count_(1), memory_inuse(0) { - ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeCreateAllocatorInfo("Cpu", ONNXRuntimeDeviceAllocator, 0, ONNXRuntimeMemTypeDefault, &cpuAllocatorInfo)); +OrtAllocatorInfo* cpuAllocatorInfo; +MockedOrtAllocator() : ref_count_(1), memory_inuse(0) { + ORT_THROW_ON_ERROR(OrtCreateAllocatorInfo("Cpu", OrtDeviceAllocator, 0, OrtMemTypeDefault, &cpuAllocatorInfo)); } -~MockedONNXRuntimeAllocator() { +~MockedOrtAllocator() { assert(ref_count_ == 0); - ReleaseONNXRuntimeAllocatorInfo(cpuAllocatorInfo); + ReleaseOrtAllocatorInfo(cpuAllocatorInfo); } public: -MockedONNXRuntimeAllocator(const MockedONNXRuntimeAllocator&) = delete; -MockedONNXRuntimeAllocator& operator=(const MockedONNXRuntimeAllocator&) = delete; -ONNXRuntimeAllocatorInteface** Upcast() { - return const_cast(&vtable_); +MockedOrtAllocator(const MockedOrtAllocator&) = delete; +MockedOrtAllocator& operator=(const MockedOrtAllocator&) = delete; +OrtAllocatorInterface** Upcast() { + return const_cast(&vtable_); } -static ONNXRuntimeAllocatorInteface** Create() { - return (ONNXRuntimeAllocatorInteface**)new MockedONNXRuntimeAllocator(); +static OrtAllocatorInterface** Create() { + return (OrtAllocatorInterface**)new MockedOrtAllocator(); } void* Alloc(size_t size) { constexpr size_t extra_len = sizeof(size_t); @@ -74,7 +74,7 @@ void Free(void* p) { memory_inuse.fetch_sub(len); return ::free(p); } -const ONNXRuntimeAllocatorInfo* Info() const { +const OrtAllocatorInfo* Info() const { return cpuAllocatorInfo; } @@ -82,4 +82,4 @@ void LeakCheck() { if (memory_inuse.load()) throw std::runtime_error("memory leak!!!"); } -ONNXRUNTIME_ALLOCATOR_IMPL_END +ORT_ALLOCATOR_IMPL_END diff --git a/onnxruntime/test/util/test_allocator.cc b/onnxruntime/test/util/test_allocator.cc index 026a0ba725f5a..3119042e6dd36 100644 --- a/onnxruntime/test/util/test_allocator.cc +++ b/onnxruntime/test/util/test_allocator.cc @@ -3,5 +3,5 @@ #include "test_allocator.h" -ONNXRuntimeAllocatorInteface MockedONNXRuntimeAllocator::table_ = { - {MockedONNXRuntimeAllocator::AddRef_, MockedONNXRuntimeAllocator::Release_}, MockedONNXRuntimeAllocator::Alloc_, MockedONNXRuntimeAllocator::Free_, MockedONNXRuntimeAllocator::Info_}; +OrtAllocatorInterface MockedOrtAllocator::table_ = { + {MockedOrtAllocator::AddRef_, MockedOrtAllocator::Release_}, MockedOrtAllocator::Alloc_, MockedOrtAllocator::Free_, MockedOrtAllocator::Info_}; diff --git a/onnxruntime/test/util/test_environment.cc b/onnxruntime/test/util/test_environment.cc index 440e2c6bff981..b35d957312bcf 100644 --- a/onnxruntime/test/util/test_environment.cc +++ b/onnxruntime/test/util/test_environment.cc @@ -20,14 +20,14 @@ namespace test { static std::unique_ptr<::onnxruntime::logging::LoggingManager> s_default_logging_manager; ::onnxruntime::logging::LoggingManager& DefaultLoggingManager() { - ONNXRUNTIME_ENFORCE(s_default_logging_manager != nullptr, + ORT_ENFORCE(s_default_logging_manager != nullptr, "Need a TestEnvironment instance to provide the default logging manager."); return *s_default_logging_manager; } TestEnvironment::TestEnvironment(int argc, char** argv, bool create_default_logging_manager) { - ONNXRUNTIME_ENFORCE(s_default_logging_manager == nullptr, + ORT_ENFORCE(s_default_logging_manager == nullptr, "Only expected one instance of TestEnvironment to be created."); std::clog << "Initializing unit testing." << std::endl; @@ -49,7 +49,7 @@ TestEnvironment::TestEnvironment(int argc, char** argv, bool create_default_logg #ifdef HAVE_FRAMEWORK_LIB auto status = Environment::Create(runtime_environment_); - ONNXRUNTIME_ENFORCE(status == Status::OK(), "Failed creating runtime environment. ", status.ErrorMessage()); + ORT_ENFORCE(status == Status::OK(), "Failed creating runtime environment. ", status.ErrorMessage()); #endif }