From afc8606dcc15dc451efdb9345f5bed3ca04cad0b Mon Sep 17 00:00:00 2001 From: Anirudh Subramanian Date: Fri, 9 Aug 2019 19:29:09 +0000 Subject: [PATCH 1/5] Fix ConcatType and add test --- src/operator/nn/concat.cc | 33 +++++++++++++++++++--------- tests/python/gpu/test_contrib_amp.py | 12 +++++++++- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc index 80469b5385eb..6b5e4d6f486f 100644 --- a/src/operator/nn/concat.cc +++ b/src/operator/nn/concat.cc @@ -144,6 +144,7 @@ bool ConcatType(const nnvm::NodeAttrs& attrs, const ConcatParam& param_ = nnvm::get(attrs.parsed); int dtype = -1; + // checks uniformity of input for (int i : *in_type) { if (dtype == -1) { dtype = i; @@ -154,18 +155,30 @@ bool ConcatType(const nnvm::NodeAttrs& attrs, } } - if (dtype == -1) { - LOG(FATAL) << "Not enough information to infer type in Concat."; - return false; - } - size_t nin = param_.num_args; - in_type->clear(); - for (size_t i = 0; i < nin; ++i) in_type->push_back(dtype); - - out_type->clear(); - out_type->push_back(dtype); + // if in types are known out types are unknown + if (dtype != -1 && (*out_type)[0] == -1) { + (*out_type)[0] = dtype; + in_type->clear(); + for (size_t i = 0; i < nin; ++i) { + in_type->push_back(dtype); + } + // if out types are known in types are unknown + } else if ((*out_type)[0] != -1 && dtype == -1) { + in_type->clear(); + for (size_t i = 0; i < nin; ++i) { + in_type->push_back((*out_type)[0]); + } + // if both out_types and in_types are known, and different + } else if ((*out_type)[0] != -1 && dtype != -1 && ((*out_type)[0] != dtype)) { + std::ostringstream os; + os << "Type inconsistent, Provided = " + << mxnet::op::type_string((*out_type)[0]) << ',' + << " inferred type = " << mxnet::op::type_string(dtype); + throw mxnet::op::InferTypeError(os.str(), 0); + return false; + } return true; } diff --git a/tests/python/gpu/test_contrib_amp.py b/tests/python/gpu/test_contrib_amp.py index 7927cc99160b..0bc605ac0128 100644 --- a/tests/python/gpu/test_contrib_amp.py +++ b/tests/python/gpu/test_contrib_amp.py @@ -26,7 +26,7 @@ from nose.tools import assert_raises from mxnet.test_utils import set_default_context, download_model, same_symbol_structure from mxnet.gluon.model_zoo.vision import get_model -from mxnet.gluon import SymbolBlock +from mxnet.gluon import SymbolBlock, nn, rnn from mxnet.contrib.amp import amp curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) @@ -300,6 +300,16 @@ def check_amp_convert_hybrid_block(): params = converted_model.collect_params() assert params["stage2_unit1_conv2_weight"].dtype == np.float16 + model = nn.HybridSequential() + model.add(rnn.LSTM(hidden_size=10,num_layers=2,bidirectional=True)) + model.add(nn.Dense(2)) + model.initialize() + model.hybridize() + out = model(mx.nd.ones((2, 3, 4))) + new_model = amp.convert_hybrid_block(model) + out2 = new_model(mx.nd.ones((2, 3, 4))) + mx.test_utils.assert_almost_equal(out.asnumpy(), out2.asnumpy(), atol=1e-2, rtol=1e-2) + with mx.Context(mx.gpu(0)): check_amp_convert_symbol() check_amp_convert_model() From 7bc0b4defaa0deca9330e8825d7060dad6c1e31f Mon Sep 17 00:00:00 2001 From: Anirudh Subramanian Date: Fri, 9 Aug 2019 19:30:38 +0000 Subject: [PATCH 2/5] Remove return false --- src/operator/nn/concat.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc index 6b5e4d6f486f..88c97b4aa3c0 100644 --- a/src/operator/nn/concat.cc +++ b/src/operator/nn/concat.cc @@ -177,7 +177,6 @@ bool ConcatType(const nnvm::NodeAttrs& attrs, << mxnet::op::type_string((*out_type)[0]) << ',' << " inferred type = " << mxnet::op::type_string(dtype); throw mxnet::op::InferTypeError(os.str(), 0); - return false; } return true; } From 67ba70edc1f339761a97b76396772b1ccc63cd69 Mon Sep 17 00:00:00 2001 From: Anirudh Subramanian Date: Fri, 9 Aug 2019 20:33:50 +0000 Subject: [PATCH 3/5] Change error message --- src/operator/nn/concat.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc index 88c97b4aa3c0..9e016bf884f2 100644 --- a/src/operator/nn/concat.cc +++ b/src/operator/nn/concat.cc @@ -173,7 +173,7 @@ bool ConcatType(const nnvm::NodeAttrs& attrs, // if both out_types and in_types are known, and different } else if ((*out_type)[0] != -1 && dtype != -1 && ((*out_type)[0] != dtype)) { std::ostringstream os; - os << "Type inconsistent, Provided = " + os << "Type inconsistent, Provided output type = " << mxnet::op::type_string((*out_type)[0]) << ',' << " inferred type = " << mxnet::op::type_string(dtype); throw mxnet::op::InferTypeError(os.str(), 0); From 411d88ef49a73ce6590f92b32f49fe8ece24d2c2 Mon Sep 17 00:00:00 2001 From: Anirudh Subramanian Date: Mon, 12 Aug 2019 22:11:11 +0000 Subject: [PATCH 4/5] Run RNN test only when CUDNN enabled --- tests/python/gpu/test_contrib_amp.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/python/gpu/test_contrib_amp.py b/tests/python/gpu/test_contrib_amp.py index 0bc605ac0128..d7bef67af3e1 100644 --- a/tests/python/gpu/test_contrib_amp.py +++ b/tests/python/gpu/test_contrib_amp.py @@ -30,7 +30,7 @@ from mxnet.contrib.amp import amp curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed, teardown +from common import with_seed, teardown, assert_raises_cudnn_not_satisfied def test_amp_coverage(): conditional = [item[0] for item in amp.lists.symbol.CONDITIONAL_FP32_FUNCS] @@ -300,8 +300,17 @@ def check_amp_convert_hybrid_block(): params = converted_model.collect_params() assert params["stage2_unit1_conv2_weight"].dtype == np.float16 + with mx.Context(mx.gpu(0)): + check_amp_convert_symbol() + check_amp_convert_model() + check_amp_convert_hybrid_block() + +@with_seed() +@assert_raises_cudnn_not_satisfied(min_version='5.1.10') +def test_amp_conversion_rnn(): + with mx.Context(mx.gpu(0)): model = nn.HybridSequential() - model.add(rnn.LSTM(hidden_size=10,num_layers=2,bidirectional=True)) + model.add(rnn.LSTM(hidden_size=10, num_layers=2, bidirectional=True)) model.add(nn.Dense(2)) model.initialize() model.hybridize() @@ -310,11 +319,6 @@ def check_amp_convert_hybrid_block(): out2 = new_model(mx.nd.ones((2, 3, 4))) mx.test_utils.assert_almost_equal(out.asnumpy(), out2.asnumpy(), atol=1e-2, rtol=1e-2) - with mx.Context(mx.gpu(0)): - check_amp_convert_symbol() - check_amp_convert_model() - check_amp_convert_hybrid_block() - @with_seed() def test_module_backward_compatibility(): From 9024b2fc36857d32e652e2c579a6ed65ce5e7d55 Mon Sep 17 00:00:00 2001 From: Anirudh Subramanian Date: Wed, 14 Aug 2019 21:34:55 +0000 Subject: [PATCH 5/5] set default context for test_contrib_amp --- tests/python/gpu/test_contrib_amp.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/gpu/test_contrib_amp.py b/tests/python/gpu/test_contrib_amp.py index d7bef67af3e1..3daab0f7bb6a 100644 --- a/tests/python/gpu/test_contrib_amp.py +++ b/tests/python/gpu/test_contrib_amp.py @@ -31,6 +31,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) from common import with_seed, teardown, assert_raises_cudnn_not_satisfied +set_default_context(mx.gpu(0)) def test_amp_coverage(): conditional = [item[0] for item in amp.lists.symbol.CONDITIONAL_FP32_FUNCS]