Skip to content

Commit

Permalink
[js/webgpu] following up for JSEP/WebGPU code cleanup (microsoft#15666)
Browse files Browse the repository at this point in the history
### Description
This PR resolves a part of non-critical comments from code review
comments in microsoft#14579.

- use `USE_JSEP` instead of `USE_JS` in build definition to make it less
ambiguous
- remove unused util functions from util.ts
- fix transpose.h
- other misc fixes
  • Loading branch information
fs-eire authored Apr 26, 2023
1 parent 069950d commit a8728c4
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 325 deletions.
6 changes: 4 additions & 2 deletions web/lib/wasm/jsep/backend-webgpu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,10 @@ export class WebGpuBackend {
}

dispose(): void {
// TODO: uninitialization
// this.glContext.dispose();
// currently, we do not do anything in this function. In all known use cases, we don't have the requirement to
// actually dispose the WebGpuBackend instance, because it's always used as a singleton.
//
// revisit this place if we get real requirement to dispose the instance.
}

getCommandEncoder(): GPUCommandEncoder {
Expand Down
4 changes: 2 additions & 2 deletions web/lib/wasm/jsep/init.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class TensorViewImpl implements TensorView {
}
}

class OpKernelContext implements ComputeContext {
class ComputeContextImpl implements ComputeContext {
readonly opKernelContext: number;
readonly inputs: readonly TensorView[];
get customData(): {[key: string]: unknown} {
Expand Down Expand Up @@ -142,7 +142,7 @@ export const init = async(module: OrtWasmModule): Promise<void> => {
// jsepRun
(kernel: number, contextDataOffset: number) => {
LOG_DEBUG('verbose', () => `[WebGPU] jsepRun: kernel=${kernel}, contextDataOffset=${contextDataOffset}`);
const context = new OpKernelContext(module, backend, contextDataOffset);
const context = new ComputeContextImpl(module, backend, contextDataOffset);
return backend.computeKernel(kernel, context);
});
}
Expand Down
321 changes: 0 additions & 321 deletions web/lib/wasm/jsep/util.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,46 +4,6 @@
/* eslint-disable no-param-reassign */

export class MatMulUtil {
/**
* Fix the input shapes for MatMul operation if they need fixing
* @param dimsA The shape of tensor A. Should be an array of positive integers
* @param dimsB The shape of tensor B. Should be an array of positive integers
* @returns A tuple containing the preprocessed input shapes as required by ONNX specifications
*/
static preprocessInputShapes(dimsA: readonly number[], dimsB: readonly number[]):
[readonly number[], readonly number[]] {
// If the first argument is 1-D, it is promoted to a matrix by prepending
// a 1 to its dimensions. After matrix multiplication the prepended 1 is
// removed.
const a = (dimsA.length === 1) ? [1, dimsA[0]] : dimsA;

// If the second argument is 1-D, it is promoted to a matrix by appending
// a 1 to its dimensions. After matrix multiplication the appended 1 is
// removed.
const b = (dimsB.length === 1) ? [dimsB[0], 1] : dimsB;

return [a, b];
}

/**
* Fix the output shape computed for MatMul operation if it needs fixing
* @param outputShape The computed outputShape. Should be an array (atleast of length 2) of positive integers.
* This will be mutated.
* @param aRank The rank of tensor A.
* @param bRank The rank of tensor B.
*/
static postprocessOutputShape(outputShape: number[], aRank: number, bRank: number): void {
// Remove prepended dimension if first input is 1d
if (aRank === 1) {
// outputShape = outputShape.slice(0, outputShape.length - 2).concat(outputShape.slice(outputShape.length - 1));
outputShape.splice(outputShape.length - 2, 1);
}
// Remove appended dimension if second input is 1d
if (bRank === 1) {
outputShape.pop();
}
}

/**
* Calculate the expected shape when matrix multiplication
* @param a The shape of tensor A. Should be a tuple of 2 positive integers
Expand Down Expand Up @@ -102,39 +62,6 @@ export class BroadcastUtil {
return cdims;
}

/**
* Given the indices of a broadcasted tensor, calculate the original indices
* @param broadcastedIndices The given indices of the broadcasted tensor.
* @param originalShape The original shape of the tensor before broadcas
* @returns The calculated indices that maps to the original tensor.
*/
static index(broadcastedIndices: readonly number[], originalShape: readonly number[]): number[] {
// NOTE 1: we assume the parameter broadcastedIndices is valid. ie. it should have the same
// length as the broadcasted shape, and for each dimension the index should
// not be out of range.
const originalIndices = new Array(originalShape.length);
BroadcastUtil.fillIndex(broadcastedIndices, originalShape, originalIndices);
return originalIndices;
}

/**
* Given the indices of a broadcasted tensor, calculate the original indices
* @param broadcastedIndices The given indices of the broadcasted tensor.
* @param originalShape The original shape of the tensor before broadcast
* @param originalIndices The mapping of broadcastedIndices to the originalIndices (output parameter - will be
* mutated).
*/
static fillIndex(broadcastedIndices: readonly number[], originalShape: readonly number[], originalIndices: number[]):
void {
// NOTE 1: we assume the parameter broadcastedIndices is valid. ie. it should have the same length as the
// broadcasted shape, and for each dimension the index should not be out of range.
// NOTE 2: we assume the parameter originalIndices has the same length as the originalShape
const dimOffset = broadcastedIndices.length - originalShape.length;
for (let i = 0; i < originalShape.length; i++) {
originalIndices[i] = broadcastedIndices[dimOffset + i] % originalShape[i];
}
}

/**
* Determine if a shape is unidirectional broadcastable to another shape
* @param shape The input shape
Expand All @@ -154,27 +81,6 @@ export class BroadcastUtil {
}
return true;
}

/**
* Determine the broadcasted dims in input shape based on the given output shape.
* Note that this function only returns the broadcasted dims.
* @param inputShape The input shape
* @param outputShape The output shape
* @returns The broadcasted dims in input shape.
*/
static getBroadcastDims(inputShape: readonly number[], outputShape: readonly number[]): number[] {
const inRank = inputShape.length;
const dims: number[] = [];
for (let i = 0; i < inRank; i++) {
const dim = inRank - 1 - i;
const a = inputShape[dim] || 1;
const b = outputShape[outputShape.length - 1 - i] || 1;
if (b > 1 && a === 1) {
dims.unshift(dim);
}
}
return dims;
}
}


Expand Down Expand Up @@ -240,38 +146,6 @@ export class ShapeUtil {
return strides;
}

static transpose(dims: readonly number[]): readonly number[] {
const copy = dims.slice();
return copy.reverse();
}

static indicesToOffset(indices: readonly number[], strides: readonly number[], axis?: number): number {
if (axis === undefined) {
axis = indices.length;
}
let offset = 0;
for (let i = 0; i < axis; ++i) {
offset += strides[i] * indices[i];
}
return offset;
}

static offsetToIndices(offset: number, strides: readonly number[]): readonly number[] {
const rank = strides.length;
if (rank === 0) {
return [];
} else if (rank === 1) {
return [offset * strides[0]];
}
const indices: number[] = new Array(strides.length);
for (let i = 0; i < indices.length - 1; ++i) {
indices[i] = Math.floor(offset / strides[i]);
offset -= indices[i] * strides[i];
}
indices[indices.length - 1] = offset;
return indices;
}

/**
* normailze axis of range [-r, r) into [0, r).
*/
Expand All @@ -286,98 +160,6 @@ export class ShapeUtil {
return axes.map(x => this.normalizeAxis(x, tensorRank ?? axes.length));
}

/**
* Increment an index into a tensor (in lexicographic ordering), wrapping around the specified upper_bound.
* @param index Given index to increment (Will be mutated)
* @param dims The dimensions of the tensor for which the given index corresponds to
* @param axisToIncrementOn The 1-indexed axis to increment on. If undefined, axisToIncrementOn == rank
*/
static incrementIndex(index: number[], dims: readonly number[], axisToIncrementOn?: number): void {
if (dims.length === 0 || index.length === 0) {
throw new Error('Index incrementing unsupported for scalar Tensor');
}
if (axisToIncrementOn === undefined) {
axisToIncrementOn = dims.length;
} else {
if (axisToIncrementOn <= 0 || axisToIncrementOn > dims.length) {
throw new Error('Incorrect axis to increment on');
}
}

for (let k = axisToIncrementOn - 1; k >= 0; --k) {
index[k]++;
if (index[k] < dims[k]) {
break;
}
index[k] = 0;
}
}

/**
* Produces a new dimensions array based on the values in the 'originalDimensions' and 'shape' array
* Used in Reshape
* @param originalDims Original Shape array
* @param shapeHints array containing values to compute the new dimensions
* For example:
* originalDims = [2,2] and shapeHints = [0,-1] will return [2,2]
* originalDims = [2,2] and shapeHints = [4] will return [4]
* originalDims = [2,2] and shapeHints = [5] will throw an exception
* https://github.com/onnx/onnx/blob/main/docs/Operators.md#Reshape
*/

static calculateReshapedDims(originalDims: readonly number[], shapeHints: ArrayLike<number>): number[] {
// reshape to a Scalar Tensor
if (shapeHints.length === 0) {
if (originalDims.length === 0 || ShapeUtil.size(originalDims) === 1) {
return [];
} else {
throw new Error('cannot reshape to a scalar Tensor');
}
}

const nDims = shapeHints.length;
const reshapedDims = new Array<number>(nDims);
let unknownDimension = -1;
let newTensorSize = 1;
for (let i = 0; i < nDims; i++) {
if (shapeHints[i] < -1) {
throw new Error('a dimension in shape hints cannot be less than -1');
}
if (shapeHints[i] === -1) {
if (unknownDimension !== -1) {
throw new Error('at most one dimension in shape hints can be -1');
}
unknownDimension = i;
} else {
if (shapeHints[i] === 0) {
if (i >= originalDims.length) {
throw new Error('the dimension with value zero exceeds the dimension size of the input tensor');
}
reshapedDims[i] = originalDims[i];
} else {
reshapedDims[i] = shapeHints[i];
}
newTensorSize *= reshapedDims[i];
}
}

const oldTensorSize = ShapeUtil.size(originalDims);
if (unknownDimension !== -1) {
if (oldTensorSize % newTensorSize !== 0) {
throw new Error(`the input tensor cannot be reshaped to the requested shape. Input shape: [${
originalDims}] Output shape: [${shapeHints}]`);
}
reshapedDims[unknownDimension] = oldTensorSize / newTensorSize;
}
// validate sizes from originalDims and reshapedDims match
else {
if (newTensorSize !== oldTensorSize) {
throw new Error('reshapedDims and originalDims don\'t have matching sizes');
}
}
return reshapedDims;
}

/**
* Sorts a given array based on the indices in the Perm array
* Used in Transpose
Expand Down Expand Up @@ -413,109 +195,6 @@ export class ShapeUtil {
}
return shape1.every((v, i) => v === shape2[i]);
}

/**
* Validates if the given `dims` or `shape` is valid in ONNX.js context and returns data size
* @param dims - input `dims` that needs to be checked
*/
static validateDimsAndCalcSize(dims: readonly number[]): number {
if (dims.length > 6) {
throw new TypeError('Only rank 0 to 6 is supported for tensor shape.');
}
let size = 1;
for (const n of dims) {
if (!Number.isInteger(n)) {
throw new TypeError(`Invalid shape: ${n} is not an integer`);
}
if (n < 0 || n > 2147483647) {
throw new TypeError(`Invalid shape: length ${n} is not allowed`);
}
size *= n;
}
return size;
}

/**
* Determines the shape of output tensor y = flatten(x, axis)
* @param dims - shape of input tensor
* @param axis - flatten axis, in the range [-r, r]
*/
static flattenShape(dims: readonly number[], axis: number): readonly number[] {
if (axis < 0) {
axis += dims.length;
}
const total = dims.reduce((x, y) => x * y, 1);
const right = dims.slice(axis).reduce((x, y) => x * y, 1);
const outputDims = [total / right, right];

return outputDims;
}

/**
* Determines the shape of output tensor y = squeeze(x, axes)
* @param dims - shape of input tensor
* @param axes - squeeze axes
*/
static squeezeShape(dims: readonly number[], axes: readonly number[]): readonly number[] {
const outputDims = new Array<number>();

// sanity check
axes = ShapeUtil.normalizeAxes(axes, dims.length);

for (let i = 0; i < dims.length; i++) {
const inSqueezeList = axes.indexOf(i) >= 0;
if (inSqueezeList && dims[i] !== 1) {
throw new Error('squeeze an axis of size different than 1');
}

if ((axes.length === 0 && dims[i] > 1) || (axes.length > 0 && !inSqueezeList)) {
outputDims.push(dims[i]);
}
}

return outputDims;
}

/**
* Determines the shape of output tensor y = unsqueeze(x, axes)
* @param dims - shape of input tensor
* @param axes - unsqueeze axes
*/
static unsqueezeShape(dims: readonly number[], axes: readonly number[]): readonly number[] {
const outputDims = new Array<number>(dims.length + axes.length);

// initialize the array elements to 0
outputDims.fill(0);

// set all axes indices to 1 in outputDims and check for duplicates
for (let i = 0; i < axes.length; i++) {
const axis = ShapeUtil.normalizeAxis(axes[i], outputDims.length);
if (axis >= outputDims.length) {
throw new Error('\'axes\' has an out of range axis');
}
if (outputDims[axis] !== 0) {
throw new Error('\'axes\' has a duplicate axis');
}

outputDims[axis] = 1;
}

// fill in the zero entries of outputDims with the input tensor's shape
let inputDimsIterator = 0;
for (let i = 0; i < outputDims.length; i++) {
if (outputDims[i] === 0) {
outputDims[i] = dims[inputDimsIterator++];
}
}

// sanity check assertion. 'inputDimsIterator'
// should be equal to the length of 'dims'
if (inputDimsIterator !== dims.length) {
throw new Error('the unsqueezed dimension could not be established');
}

return outputDims;
}
}

export class PoolConvUtil {
Expand Down

0 comments on commit a8728c4

Please sign in to comment.