diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index b3d6fade02050..d5ab7f4b74a9a 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -68,7 +68,7 @@ option(onnxruntime_USE_QNN "Build with QNN support" OFF) option(onnxruntime_USE_SNPE "Build with SNPE support" OFF) option(onnxruntime_USE_RKNPU "Build with RKNPU support" OFF) option(onnxruntime_USE_DNNL "Build with DNNL support" OFF) -option(onnxruntime_USE_JS "Build with JavaScript implemented kernels support" OFF) +option(onnxruntime_USE_JSEP "Build with JavaScript implemented kernels support" OFF) option(onnxruntime_BUILD_UNIT_TESTS "Build ONNXRuntime unit tests" ON) option(onnxruntime_BUILD_CSHARP "Build C# library" OFF) option(onnxruntime_BUILD_OBJC "Build Objective-C library" OFF) @@ -662,9 +662,9 @@ if (onnxruntime_USE_NNAPI_BUILTIN) list(APPEND ORT_PROVIDER_CMAKE_FLAGS -Donnxruntime_USE_NNAPI_BUILTIN=1) list(APPEND ONNXRUNTIME_PROVIDER_NAMES nnapi) endif() -if (onnxruntime_USE_JS) - list(APPEND ORT_PROVIDER_FLAGS -DUSE_JS=1) - list(APPEND ORT_PROVIDER_CMAKE_FLAGS -Donnxruntime_USE_JS=1) +if (onnxruntime_USE_JSEP) + list(APPEND ORT_PROVIDER_FLAGS -DUSE_JSEP=1) + list(APPEND ORT_PROVIDER_CMAKE_FLAGS -Donnxruntime_USE_JSEP=1) list(APPEND ONNXRUNTIME_PROVIDER_NAMES js) endif() if (onnxruntime_USE_QNN) diff --git a/cmake/onnxruntime_providers.cmake b/cmake/onnxruntime_providers.cmake index c253b6b9c7197..caae2aacfd582 100644 --- a/cmake/onnxruntime_providers.cmake +++ b/cmake/onnxruntime_providers.cmake @@ -114,7 +114,7 @@ endif() if(onnxruntime_USE_NNAPI_BUILTIN) set(PROVIDERS_NNAPI onnxruntime_providers_nnapi) endif() -if(onnxruntime_USE_JS) +if(onnxruntime_USE_JSEP) set(PROVIDERS_JS onnxruntime_providers_js) endif() if(onnxruntime_USE_QNN) @@ -1067,8 +1067,8 @@ if (onnxruntime_USE_NNAPI_BUILTIN) endif() endif() -if (onnxruntime_USE_JS) - add_compile_definitions(USE_JS=1) +if (onnxruntime_USE_JSEP) + add_compile_definitions(USE_JSEP=1) file(GLOB_RECURSE onnxruntime_providers_js_cc_srcs "${ONNXRUNTIME_ROOT}/core/providers/js/*.h" diff --git a/cmake/onnxruntime_unittests.cmake b/cmake/onnxruntime_unittests.cmake index c0c50e0e11b08..6d506522a91de 100644 --- a/cmake/onnxruntime_unittests.cmake +++ b/cmake/onnxruntime_unittests.cmake @@ -504,7 +504,7 @@ if(onnxruntime_USE_NNAPI_BUILTIN) list(APPEND onnxruntime_test_providers_dependencies onnxruntime_providers_nnapi) endif() -if(onnxruntime_USE_JS) +if(onnxruntime_USE_JSEP) list(APPEND onnxruntime_test_providers_dependencies onnxruntime_providers_js) endif() @@ -609,7 +609,7 @@ if(onnxruntime_USE_NNAPI_BUILTIN) list(APPEND onnxruntime_test_providers_libs onnxruntime_providers_nnapi) endif() -if(onnxruntime_USE_JS) +if(onnxruntime_USE_JSEP) list(APPEND onnxruntime_test_framework_src_patterns ${TEST_SRC_DIR}/providers/js/*) list(APPEND onnxruntime_test_framework_libs onnxruntime_providers_js) list(APPEND onnxruntime_test_providers_dependencies onnxruntime_providers_js) @@ -851,7 +851,7 @@ if (onnxruntime_BUILD_WEBASSEMBLY) if (onnxruntime_ENABLE_WEBASSEMBLY_THREADS) set_property(TARGET onnxruntime_test_all APPEND_STRING PROPERTY LINK_FLAGS " -s USE_PTHREADS=1 -s PROXY_TO_PTHREAD=1") endif() - if (onnxruntime_USE_JS) + if (onnxruntime_USE_JSEP) set_property(TARGET onnxruntime_test_all APPEND_STRING PROPERTY LINK_FLAGS " --pre-js \"${ONNXRUNTIME_ROOT}/wasm/js_internal_api.js\"") endif() endif() diff --git a/cmake/onnxruntime_webassembly.cmake b/cmake/onnxruntime_webassembly.cmake index 80a44ffb3fa63..193315f541b85 100644 --- a/cmake/onnxruntime_webassembly.cmake +++ b/cmake/onnxruntime_webassembly.cmake @@ -199,7 +199,7 @@ else() endif() set(EXPORTED_RUNTIME_METHODS "['stackAlloc','stackRestore','stackSave','UTF8ToString','stringToUTF8','lengthBytesUTF8']") - if (onnxruntime_USE_JS) + if (onnxruntime_USE_JSEP) set(EXPORTED_FUNCTIONS "_malloc,_free,_JsepOutput") else() set(EXPORTED_FUNCTIONS "_malloc,_free") @@ -219,12 +219,12 @@ else() --no-entry ) - if (onnxruntime_USE_JS) + if (onnxruntime_USE_JSEP) # NOTE: "-s ASYNCIFY=1" is required for JSEP to work with WebGPU # This flag allows async functions to be called from sync functions, in the cost of binary size and # build time. See https://emscripten.org/docs/porting/asyncify.html for more details. - target_compile_definitions(onnxruntime_webassembly PRIVATE USE_JS=1) + target_compile_definitions(onnxruntime_webassembly PRIVATE USE_JSEP=1) target_link_options(onnxruntime_webassembly PRIVATE --pre-js "${ONNXRUNTIME_ROOT}/wasm/js_internal_api.js" "SHELL:-s ASYNCIFY=1" diff --git a/js/web/lib/wasm/jsep/backend-webgpu.ts b/js/web/lib/wasm/jsep/backend-webgpu.ts index 849e8d3f41bbc..1589562aa9cfd 100644 --- a/js/web/lib/wasm/jsep/backend-webgpu.ts +++ b/js/web/lib/wasm/jsep/backend-webgpu.ts @@ -144,8 +144,10 @@ export class WebGpuBackend { } dispose(): void { - // TODO: uninitialization - // this.glContext.dispose(); + // currently, we do not do anything in this function. In all known use cases, we don't have the requirement to + // actually dispose the WebGpuBackend instance, because it's always used as a singleton. + // + // revisit this place if we get real requirement to dispose the instance. } getCommandEncoder(): GPUCommandEncoder { diff --git a/js/web/lib/wasm/jsep/init.ts b/js/web/lib/wasm/jsep/init.ts index 4226a0ef46f57..07247e1648fac 100644 --- a/js/web/lib/wasm/jsep/init.ts +++ b/js/web/lib/wasm/jsep/init.ts @@ -29,7 +29,7 @@ class TensorViewImpl implements TensorView { } } -class OpKernelContext implements ComputeContext { +class ComputeContextImpl implements ComputeContext { readonly opKernelContext: number; readonly inputs: readonly TensorView[]; get customData(): {[key: string]: unknown} { @@ -142,7 +142,7 @@ export const init = async(module: OrtWasmModule): Promise => { // jsepRun (kernel: number, contextDataOffset: number) => { LOG_DEBUG('verbose', () => `[WebGPU] jsepRun: kernel=${kernel}, contextDataOffset=${contextDataOffset}`); - const context = new OpKernelContext(module, backend, contextDataOffset); + const context = new ComputeContextImpl(module, backend, contextDataOffset); return backend.computeKernel(kernel, context); }); } diff --git a/js/web/lib/wasm/jsep/util.ts b/js/web/lib/wasm/jsep/util.ts index cd128ad5e501d..21109fd97d3fc 100644 --- a/js/web/lib/wasm/jsep/util.ts +++ b/js/web/lib/wasm/jsep/util.ts @@ -4,46 +4,6 @@ /* eslint-disable no-param-reassign */ export class MatMulUtil { - /** - * Fix the input shapes for MatMul operation if they need fixing - * @param dimsA The shape of tensor A. Should be an array of positive integers - * @param dimsB The shape of tensor B. Should be an array of positive integers - * @returns A tuple containing the preprocessed input shapes as required by ONNX specifications - */ - static preprocessInputShapes(dimsA: readonly number[], dimsB: readonly number[]): - [readonly number[], readonly number[]] { - // If the first argument is 1-D, it is promoted to a matrix by prepending - // a 1 to its dimensions. After matrix multiplication the prepended 1 is - // removed. - const a = (dimsA.length === 1) ? [1, dimsA[0]] : dimsA; - - // If the second argument is 1-D, it is promoted to a matrix by appending - // a 1 to its dimensions. After matrix multiplication the appended 1 is - // removed. - const b = (dimsB.length === 1) ? [dimsB[0], 1] : dimsB; - - return [a, b]; - } - - /** - * Fix the output shape computed for MatMul operation if it needs fixing - * @param outputShape The computed outputShape. Should be an array (atleast of length 2) of positive integers. - * This will be mutated. - * @param aRank The rank of tensor A. - * @param bRank The rank of tensor B. - */ - static postprocessOutputShape(outputShape: number[], aRank: number, bRank: number): void { - // Remove prepended dimension if first input is 1d - if (aRank === 1) { - // outputShape = outputShape.slice(0, outputShape.length - 2).concat(outputShape.slice(outputShape.length - 1)); - outputShape.splice(outputShape.length - 2, 1); - } - // Remove appended dimension if second input is 1d - if (bRank === 1) { - outputShape.pop(); - } - } - /** * Calculate the expected shape when matrix multiplication * @param a The shape of tensor A. Should be a tuple of 2 positive integers @@ -102,39 +62,6 @@ export class BroadcastUtil { return cdims; } - /** - * Given the indices of a broadcasted tensor, calculate the original indices - * @param broadcastedIndices The given indices of the broadcasted tensor. - * @param originalShape The original shape of the tensor before broadcas - * @returns The calculated indices that maps to the original tensor. - */ - static index(broadcastedIndices: readonly number[], originalShape: readonly number[]): number[] { - // NOTE 1: we assume the parameter broadcastedIndices is valid. ie. it should have the same - // length as the broadcasted shape, and for each dimension the index should - // not be out of range. - const originalIndices = new Array(originalShape.length); - BroadcastUtil.fillIndex(broadcastedIndices, originalShape, originalIndices); - return originalIndices; - } - - /** - * Given the indices of a broadcasted tensor, calculate the original indices - * @param broadcastedIndices The given indices of the broadcasted tensor. - * @param originalShape The original shape of the tensor before broadcast - * @param originalIndices The mapping of broadcastedIndices to the originalIndices (output parameter - will be - * mutated). - */ - static fillIndex(broadcastedIndices: readonly number[], originalShape: readonly number[], originalIndices: number[]): - void { - // NOTE 1: we assume the parameter broadcastedIndices is valid. ie. it should have the same length as the - // broadcasted shape, and for each dimension the index should not be out of range. - // NOTE 2: we assume the parameter originalIndices has the same length as the originalShape - const dimOffset = broadcastedIndices.length - originalShape.length; - for (let i = 0; i < originalShape.length; i++) { - originalIndices[i] = broadcastedIndices[dimOffset + i] % originalShape[i]; - } - } - /** * Determine if a shape is unidirectional broadcastable to another shape * @param shape The input shape @@ -154,27 +81,6 @@ export class BroadcastUtil { } return true; } - - /** - * Determine the broadcasted dims in input shape based on the given output shape. - * Note that this function only returns the broadcasted dims. - * @param inputShape The input shape - * @param outputShape The output shape - * @returns The broadcasted dims in input shape. - */ - static getBroadcastDims(inputShape: readonly number[], outputShape: readonly number[]): number[] { - const inRank = inputShape.length; - const dims: number[] = []; - for (let i = 0; i < inRank; i++) { - const dim = inRank - 1 - i; - const a = inputShape[dim] || 1; - const b = outputShape[outputShape.length - 1 - i] || 1; - if (b > 1 && a === 1) { - dims.unshift(dim); - } - } - return dims; - } } @@ -240,38 +146,6 @@ export class ShapeUtil { return strides; } - static transpose(dims: readonly number[]): readonly number[] { - const copy = dims.slice(); - return copy.reverse(); - } - - static indicesToOffset(indices: readonly number[], strides: readonly number[], axis?: number): number { - if (axis === undefined) { - axis = indices.length; - } - let offset = 0; - for (let i = 0; i < axis; ++i) { - offset += strides[i] * indices[i]; - } - return offset; - } - - static offsetToIndices(offset: number, strides: readonly number[]): readonly number[] { - const rank = strides.length; - if (rank === 0) { - return []; - } else if (rank === 1) { - return [offset * strides[0]]; - } - const indices: number[] = new Array(strides.length); - for (let i = 0; i < indices.length - 1; ++i) { - indices[i] = Math.floor(offset / strides[i]); - offset -= indices[i] * strides[i]; - } - indices[indices.length - 1] = offset; - return indices; - } - /** * normailze axis of range [-r, r) into [0, r). */ @@ -286,98 +160,6 @@ export class ShapeUtil { return axes.map(x => this.normalizeAxis(x, tensorRank ?? axes.length)); } - /** - * Increment an index into a tensor (in lexicographic ordering), wrapping around the specified upper_bound. - * @param index Given index to increment (Will be mutated) - * @param dims The dimensions of the tensor for which the given index corresponds to - * @param axisToIncrementOn The 1-indexed axis to increment on. If undefined, axisToIncrementOn == rank - */ - static incrementIndex(index: number[], dims: readonly number[], axisToIncrementOn?: number): void { - if (dims.length === 0 || index.length === 0) { - throw new Error('Index incrementing unsupported for scalar Tensor'); - } - if (axisToIncrementOn === undefined) { - axisToIncrementOn = dims.length; - } else { - if (axisToIncrementOn <= 0 || axisToIncrementOn > dims.length) { - throw new Error('Incorrect axis to increment on'); - } - } - - for (let k = axisToIncrementOn - 1; k >= 0; --k) { - index[k]++; - if (index[k] < dims[k]) { - break; - } - index[k] = 0; - } - } - - /** - * Produces a new dimensions array based on the values in the 'originalDimensions' and 'shape' array - * Used in Reshape - * @param originalDims Original Shape array - * @param shapeHints array containing values to compute the new dimensions - * For example: - * originalDims = [2,2] and shapeHints = [0,-1] will return [2,2] - * originalDims = [2,2] and shapeHints = [4] will return [4] - * originalDims = [2,2] and shapeHints = [5] will throw an exception - * https://github.com/onnx/onnx/blob/main/docs/Operators.md#Reshape - */ - - static calculateReshapedDims(originalDims: readonly number[], shapeHints: ArrayLike): number[] { - // reshape to a Scalar Tensor - if (shapeHints.length === 0) { - if (originalDims.length === 0 || ShapeUtil.size(originalDims) === 1) { - return []; - } else { - throw new Error('cannot reshape to a scalar Tensor'); - } - } - - const nDims = shapeHints.length; - const reshapedDims = new Array(nDims); - let unknownDimension = -1; - let newTensorSize = 1; - for (let i = 0; i < nDims; i++) { - if (shapeHints[i] < -1) { - throw new Error('a dimension in shape hints cannot be less than -1'); - } - if (shapeHints[i] === -1) { - if (unknownDimension !== -1) { - throw new Error('at most one dimension in shape hints can be -1'); - } - unknownDimension = i; - } else { - if (shapeHints[i] === 0) { - if (i >= originalDims.length) { - throw new Error('the dimension with value zero exceeds the dimension size of the input tensor'); - } - reshapedDims[i] = originalDims[i]; - } else { - reshapedDims[i] = shapeHints[i]; - } - newTensorSize *= reshapedDims[i]; - } - } - - const oldTensorSize = ShapeUtil.size(originalDims); - if (unknownDimension !== -1) { - if (oldTensorSize % newTensorSize !== 0) { - throw new Error(`the input tensor cannot be reshaped to the requested shape. Input shape: [${ - originalDims}] Output shape: [${shapeHints}]`); - } - reshapedDims[unknownDimension] = oldTensorSize / newTensorSize; - } - // validate sizes from originalDims and reshapedDims match - else { - if (newTensorSize !== oldTensorSize) { - throw new Error('reshapedDims and originalDims don\'t have matching sizes'); - } - } - return reshapedDims; - } - /** * Sorts a given array based on the indices in the Perm array * Used in Transpose @@ -413,109 +195,6 @@ export class ShapeUtil { } return shape1.every((v, i) => v === shape2[i]); } - - /** - * Validates if the given `dims` or `shape` is valid in ONNX.js context and returns data size - * @param dims - input `dims` that needs to be checked - */ - static validateDimsAndCalcSize(dims: readonly number[]): number { - if (dims.length > 6) { - throw new TypeError('Only rank 0 to 6 is supported for tensor shape.'); - } - let size = 1; - for (const n of dims) { - if (!Number.isInteger(n)) { - throw new TypeError(`Invalid shape: ${n} is not an integer`); - } - if (n < 0 || n > 2147483647) { - throw new TypeError(`Invalid shape: length ${n} is not allowed`); - } - size *= n; - } - return size; - } - - /** - * Determines the shape of output tensor y = flatten(x, axis) - * @param dims - shape of input tensor - * @param axis - flatten axis, in the range [-r, r] - */ - static flattenShape(dims: readonly number[], axis: number): readonly number[] { - if (axis < 0) { - axis += dims.length; - } - const total = dims.reduce((x, y) => x * y, 1); - const right = dims.slice(axis).reduce((x, y) => x * y, 1); - const outputDims = [total / right, right]; - - return outputDims; - } - - /** - * Determines the shape of output tensor y = squeeze(x, axes) - * @param dims - shape of input tensor - * @param axes - squeeze axes - */ - static squeezeShape(dims: readonly number[], axes: readonly number[]): readonly number[] { - const outputDims = new Array(); - - // sanity check - axes = ShapeUtil.normalizeAxes(axes, dims.length); - - for (let i = 0; i < dims.length; i++) { - const inSqueezeList = axes.indexOf(i) >= 0; - if (inSqueezeList && dims[i] !== 1) { - throw new Error('squeeze an axis of size different than 1'); - } - - if ((axes.length === 0 && dims[i] > 1) || (axes.length > 0 && !inSqueezeList)) { - outputDims.push(dims[i]); - } - } - - return outputDims; - } - - /** - * Determines the shape of output tensor y = unsqueeze(x, axes) - * @param dims - shape of input tensor - * @param axes - unsqueeze axes - */ - static unsqueezeShape(dims: readonly number[], axes: readonly number[]): readonly number[] { - const outputDims = new Array(dims.length + axes.length); - - // initialize the array elements to 0 - outputDims.fill(0); - - // set all axes indices to 1 in outputDims and check for duplicates - for (let i = 0; i < axes.length; i++) { - const axis = ShapeUtil.normalizeAxis(axes[i], outputDims.length); - if (axis >= outputDims.length) { - throw new Error('\'axes\' has an out of range axis'); - } - if (outputDims[axis] !== 0) { - throw new Error('\'axes\' has a duplicate axis'); - } - - outputDims[axis] = 1; - } - - // fill in the zero entries of outputDims with the input tensor's shape - let inputDimsIterator = 0; - for (let i = 0; i < outputDims.length; i++) { - if (outputDims[i] === 0) { - outputDims[i] = dims[inputDimsIterator++]; - } - } - - // sanity check assertion. 'inputDimsIterator' - // should be equal to the length of 'dims' - if (inputDimsIterator !== dims.length) { - throw new Error('the unsqueezed dimension could not be established'); - } - - return outputDims; - } } export class PoolConvUtil { diff --git a/onnxruntime/core/providers/get_execution_providers.cc b/onnxruntime/core/providers/get_execution_providers.cc index 42cc24a7964d7..f7b372d1eff74 100644 --- a/onnxruntime/core/providers/get_execution_providers.cc +++ b/onnxruntime/core/providers/get_execution_providers.cc @@ -102,7 +102,7 @@ constexpr ProviderInfo kProvidersInPriorityOrder[] = }, { kJsExecutionProvider, -#ifdef USE_JS +#ifdef USE_JSEP true, #else false, diff --git a/onnxruntime/core/providers/js/js_execution_provider.cc b/onnxruntime/core/providers/js/js_execution_provider.cc index d1308da7f888c..df5679bda7db0 100644 --- a/onnxruntime/core/providers/js/js_execution_provider.cc +++ b/onnxruntime/core/providers/js/js_execution_provider.cc @@ -295,7 +295,8 @@ void JsExecutionProvider::RegisterAllocator(AllocatorManager& allocator_manager) if (!cpu_alloc) { AllocatorCreationInfo cpuAllocatorCreationInfo([&](int) { return std::make_unique(); - }); + }, + 0, false); cpu_alloc = CreateAllocator(cpuAllocatorCreationInfo); allocator_manager.InsertAllocator(cpu_alloc); } diff --git a/onnxruntime/core/providers/js/js_execution_provider.h b/onnxruntime/core/providers/js/js_execution_provider.h index ce8ec53eca1f6..3e5621ff78520 100644 --- a/onnxruntime/core/providers/js/js_execution_provider.h +++ b/onnxruntime/core/providers/js/js_execution_provider.h @@ -46,6 +46,8 @@ class JsExecutionProvider : public IExecutionProvider { FusionStyle GetFusionStyle() const override { return FusionStyle::FilteredGraphViewer; } + // JSEP disallow concurrent run because actual implementation (eg. WebGPU backend) relies on global states to work, + // and concurrent run with async function may mess up the states and cause undefined behavior. bool ConcurrentRunSupported() const override { return false; } }; diff --git a/onnxruntime/core/providers/js/operators/transpose.h b/onnxruntime/core/providers/js/operators/transpose.h index f2214438c6fd1..311badbde0d11 100644 --- a/onnxruntime/core/providers/js/operators/transpose.h +++ b/onnxruntime/core/providers/js/operators/transpose.h @@ -16,17 +16,17 @@ class Transpose final : public JsKernel, public TransposeBase { std::vector perm; if (perm_specified_) { perm.resize(perm_.size()); - perm[0] = gsl::narrow_cast(perm_.size()); for (size_t i = 0; i < perm_.size(); ++i) { perm[i] = gsl::narrow_cast(perm_[i]); } } - // printf("Transpose: perm_specified_ = %d, perm.size() = %d, perm[0] = %d, perm[1] = %d, perm[2] = %d, perm[3] = %d\n", - // perm_specified_, static_cast(perm.size()), perm[0], perm[1], perm[2], perm[3]); JSEP_INIT_KERNEL_ATTRIBUTE(Transpose, ({ "perm" : $1 ? Array.from(HEAP32.subarray($2, $2 + $1)) : [] }), + // $1: length of attribute "perm" (int32[]) gsl::narrow_cast(perm_specified_ ? perm_.size() : 0), + // $2: index to HEAP32 of the first int32 element. calculated from right shift memory + // address by 2 reinterpret_cast(perm_specified_ && !perm.empty() ? perm.data() : nullptr) >> 2); } }; diff --git a/onnxruntime/core/providers/provider_factory_creators.h b/onnxruntime/core/providers/provider_factory_creators.h index 261d16a4e8be5..b019ede434b83 100644 --- a/onnxruntime/core/providers/provider_factory_creators.h +++ b/onnxruntime/core/providers/provider_factory_creators.h @@ -46,7 +46,7 @@ #include "core/providers/nnapi/nnapi_provider_factory_creator.h" #endif -#if defined(USE_JS) +#if defined(USE_JSEP) #include "core/providers/js/js_provider_factory_creator.h" #endif @@ -92,4 +92,4 @@ #if defined(USE_AZURE) #include "core/providers/azure/azure_provider_factory_creator.h" -#endif \ No newline at end of file +#endif diff --git a/onnxruntime/core/session/provider_registration.cc b/onnxruntime/core/session/provider_registration.cc index de7c8bd6fb101..8cadccd0ef376 100644 --- a/onnxruntime/core/session/provider_registration.cc +++ b/onnxruntime/core/session/provider_registration.cc @@ -91,7 +91,7 @@ ORT_API_STATUS_IMPL(OrtApis::SessionOptionsAppendExecutionProvider, status = create_not_supported_status(); #endif } else if (strcmp(provider_name, "JS") == 0) { -#if defined(USE_JS) +#if defined(USE_JSEP) options->provider_factories.push_back(JsProviderFactoryCreator::Create(provider_options)); #else status = create_not_supported_status(); diff --git a/onnxruntime/wasm/api.cc b/onnxruntime/wasm/api.cc index 47cb578f7e969..5dd0bd23353fb 100644 --- a/onnxruntime/wasm/api.cc +++ b/onnxruntime/wasm/api.cc @@ -363,11 +363,11 @@ int OrtRun(OrtSession* session, const char** input_names, const ort_tensor_handle_t* inputs, size_t input_count, const char** output_names, size_t output_count, ort_tensor_handle_t* outputs, OrtRunOptions* run_options) { -#if defined(USE_JS) +#if defined(USE_JSEP) EM_ASM({ Module["jsepRunPromise"] = new Promise(function(r) { Module.jsepRunPromiseResolve = r; }); }); #endif auto status_code = CHECK_STATUS(Run, session, run_options, input_names, inputs, input_count, output_names, output_count, outputs); -#if defined(USE_JS) +#if defined(USE_JSEP) EM_ASM({ Module.jsepRunPromiseResolve($0); }, status_code); #endif return status_code; diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index 0bc2b0f3717dc..228f39beae84b 100644 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -483,7 +483,7 @@ def convert_arg_line_to_args(self, arg_line): parser.add_argument( "--nnapi_min_api", type=int, help="Minimum Android API level to enable NNAPI, should be no less than 27" ) - parser.add_argument("--use_js", action="store_true", help="Build with JavaScript kernels.") + parser.add_argument("--use_jsep", action="store_true", help="Build with JavaScript kernels.") parser.add_argument("--use_qnn", action="store_true", help="Build with QNN support.") parser.add_argument("--qnn_home", help="Path to QNN SDK dir.") parser.add_argument("--use_rknpu", action="store_true", help="Build with RKNPU.") @@ -946,7 +946,7 @@ def generate_build_tree( "-Donnxruntime_USE_ARMNN=" + ("ON" if args.use_armnn else "OFF"), "-Donnxruntime_ARMNN_RELU_USE_CPU=" + ("OFF" if args.armnn_relu else "ON"), "-Donnxruntime_ARMNN_BN_USE_CPU=" + ("OFF" if args.armnn_bn else "ON"), - "-Donnxruntime_USE_JS=" + ("ON" if args.use_js else "OFF"), + "-Donnxruntime_USE_JSEP=" + ("ON" if args.use_jsep else "OFF"), # Training related flags "-Donnxruntime_ENABLE_NVTX_PROFILE=" + ("ON" if args.enable_nvtx_profile else "OFF"), "-Donnxruntime_ENABLE_TRAINING=" + ("ON" if args.enable_training else "OFF"), diff --git a/tools/ci_build/github/azure-pipelines/templates/win-wasm-ci.yml b/tools/ci_build/github/azure-pipelines/templates/win-wasm-ci.yml index 4ec339bb0fb81..786d7b77a076c 100644 --- a/tools/ci_build/github/azure-pipelines/templates/win-wasm-ci.yml +++ b/tools/ci_build/github/azure-pipelines/templates/win-wasm-ci.yml @@ -104,7 +104,7 @@ jobs: displayName: 'Build (simd + JSEP)' inputs: scriptPath: '$(Build.SourcesDirectory)\tools\ci_build\build.py' - arguments: '$(CommonBuildArgs) --build_dir $(Build.BinariesDirectory)\wasm_simd_jsep --enable_wasm_simd --use_js --target onnxruntime_webassembly' + arguments: '$(CommonBuildArgs) --build_dir $(Build.BinariesDirectory)\wasm_simd_jsep --enable_wasm_simd --use_jsep --target onnxruntime_webassembly' workingDirectory: '$(Build.BinariesDirectory)' - ${{ if eq(parameters.SkipPublish, false) }}: - script: |