Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Disables failing tests due to cuDNN
Browse files Browse the repository at this point in the history
  • Loading branch information
perdasilva committed Mar 27, 2019
1 parent 29578b5 commit c01d5f7
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 0 deletions.
1 change: 1 addition & 0 deletions tests/python/gpu/test_gluon_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ def test_rnn_layer_begin_state_type():
modeling_layer(fake_data)


@unittest.skip("test fails due to cuDNN arch missmatch. temporarily disabled till it gets fixed. See https://github.com/apache/incubator-mxnet/issues/14502")
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
Expand Down
1 change: 1 addition & 0 deletions tests/python/unittest/test_gluon_rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,7 @@ def test_rnn_layers_fp32():

@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@unittest.skipIf(mx.context.num_gpus() == 0, "RNN FP16 only implemented for GPU for now")
@unittest.skip("test fails due to cuDNN arch missmatch. temporarily disabled till it gets fixed. See https://github.com/apache/incubator-mxnet/issues/14502")
def test_rnn_layers_fp16():
run_rnn_layers('float16', 'float32', mx.gpu())

Expand Down

0 comments on commit c01d5f7

Please sign in to comment.