Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Make convolution operator fully work with oneDNN v2.4+ (#20847)
Browse files Browse the repository at this point in the history
* Restore full functionality to convolution

* Update src/operator/nn/dnnl/dnnl_convolution.cc

Co-authored-by: bgawrych <[email protected]>

Co-authored-by: bgawrych <[email protected]>
  • Loading branch information
bartekkuncer and bgawrych authored Feb 1, 2022
1 parent e9840b8 commit bdcf137
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 8 deletions.
16 changes: 12 additions & 4 deletions src/operator/nn/dnnl/dnnl_convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,18 @@ std::shared_ptr<dnnl::convolution_forward::primitive_desc> GetConvFwdImpl(
// suboptimal kernel for computation that has the expected memory size requirements
auto conv_pd =
std::make_shared<dnnl::convolution_forward::primitive_desc>(desc, attr, engine);
while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
conv_pd->src_desc().get_size() != GetArraySize(data) ||
(!param.dnnl_param.quantized &&
conv_pd->weights_desc().get_size() != GetArraySize(weights))) {
while (
conv_pd->dst_desc().get_size() != GetArraySize(output) ||
conv_pd->src_desc().get_size() != GetArraySize(data) ||
(!param.dnnl_param.quantized &&
conv_pd->weights_desc().get_size() != GetArraySize(weights)) ||
// With the upgrade of oneDNN to version 2.4+
// tests/python/dnnl/subgraphs/test_conv_subgraph.py::test_pos_conv_add[True-data_shape1]
// started failing. Switching away from primitive with weight dnnl::format_tag
// ABcd4b16a4b in order to temporarily fix the issue until full fix arrives.
// Tracking issue: https://github.com/apache/incubator-mxnet/issues/20826.
(param.dnnl_param.quantized && conv_pd->weights_desc().dims()[1] < 4 &&
conv_pd->weights_desc().data.padded_dims[1] == 16)) {
// next_impl() will visit desc and engine, please make sure they are still alive here.
CHECK(conv_pd->next_impl()) << "No convolution implementation for this request.";
}
Expand Down
5 changes: 1 addition & 4 deletions tests/python/dnnl/subgraphs/subgraph_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,7 @@
}
}

DATA_SHAPE=[(64, 4, 10, 10), (4, 4, 24, 24), (1, 16, 32, 32)]
# Second shape has been temporairly changed from (4, 3, 24, 24) to (4, 4, 24, 24) due to
# a bug regarding conv+sum fuse with the amount of input channels < 4. It will be reverted
# as soon as the problem is fixed. Issue: https://github.com/apache/incubator-mxnet/issues/20826.
DATA_SHAPE=[(64, 4, 10, 10), (4, 3, 24, 24), (1, 16, 32, 32)]

# Helpers
class RELU6(nn.HybridBlock):
Expand Down

0 comments on commit bdcf137

Please sign in to comment.