Skip to content

Commit 68fc83e

Browse files
committed
Run lintrunner & remove nhwc hack.
1 parent a9140e8 commit 68fc83e

File tree

4 files changed

+8
-9
lines changed

4 files changed

+8
-9
lines changed

onnxruntime/contrib_ops/cuda/grid_sample.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@ namespace onnxruntime {
99
namespace contrib {
1010
namespace cuda {
1111

12-
#define REGISTER_KERNEL_TYPED(T, VERSION, LAYOUT, DOMAIN) \
12+
#define REGISTER_KERNEL_TYPED(T, VERSION, LAYOUT, DOMAIN) \
1313
ONNX_OPERATOR_TYPED_KERNEL_EX( \
1414
GridSample, \
1515
DOMAIN, \
16-
VERSION, \
16+
VERSION, \
1717
T, \
1818
kCudaExecutionProvider, \
1919
(*KernelDefBuilder::Create()) \
@@ -68,7 +68,7 @@ Status GridSample<T, IsNHWC>::ComputeInternal(OpKernelContext* context) const {
6868
dims_output[Ch::N] = dims_input[Ch::N];
6969
dims_output[Ch::C] = dims_input[Ch::C];
7070
dims_output[Ch::H] = dims_grid[1 /* Grid::H */];
71-
dims_output[Ch::W] = dims_grid[2 /* Grid::W */];
71+
dims_output[Ch::W] = dims_grid[2 /* Grid::W */];
7272
Tensor* Y = context->Output(0, dims_output);
7373
// Return early if the output tensor is going to be of size 0
7474
if (Y->Shape().Size() == 0) {
@@ -94,6 +94,6 @@ Status GridSample<T, IsNHWC>::ComputeInternal(OpKernelContext* context) const {
9494
} // namespace contrib
9595

9696
namespace cuda {
97-
REGISTER_KERNEL_TYPED(float, 16, LAYOUT_NCHW, kOnnxDomain)
97+
REGISTER_KERNEL_TYPED(float, 16, LAYOUT_NCHW, kOnnxDomain)
9898
} // namespace cuda
9999
} // namespace onnxruntime

onnxruntime/core/providers/cuda/cuda_provider_factory.cc

-3
Original file line numberDiff line numberDiff line change
@@ -219,9 +219,6 @@ struct CUDA_Provider : Provider {
219219
info.cudnn_conv_use_max_workspace = params->cudnn_conv_use_max_workspace != 0;
220220
info.enable_cuda_graph = params->enable_cuda_graph != 0;
221221
info.prefer_nhwc = params->prefer_nhwc;
222-
// HACK
223-
info.prefer_nhwc = true;
224-
//info.prefer_nhwc = false;
225222
info.cudnn_conv1d_pad_to_nc1d = params->cudnn_conv1d_pad_to_nc1d != 0;
226223
info.tunable_op.enable = params->tunable_op_enable;
227224
info.tunable_op.tuning_enable = params->tunable_op_tuning_enable;

onnxruntime/core/providers/cuda/shared_inc/cuda_utils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ struct NumericLimits<double> {
168168
}
169169
};
170170

171-
// TODO Where to put this? good places might be
171+
// TODO Where to put this? good places might be
172172
// core/framework/tensor_shape.h
173173
// core/util/matrix_layout.h
174174

onnxruntime/test/providers/cpu/tensor/grid_sample_test_gen.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,8 @@
7676
print('test.AddAttribute("padding_mode", padding_mode);')
7777
print('test.AddAttribute("align_corners", align_corners);')
7878
print('test.AddOutput<float>("Y", Y_shape, Y_data);')
79-
print(f'test.Run(OpTester::ExpectResult::kExpectSuccess, "", GetExcludedExecutionProviders({opset_version}));')
79+
print(
80+
f'test.Run(OpTester::ExpectResult::kExpectSuccess, "", GetExcludedExecutionProviders({opset_version}));'
81+
)
8082
print("}")
8183
print("\n")

0 commit comments

Comments
 (0)