Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Address cr
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed Mar 25, 2018
1 parent d9f2068 commit 0a943c3
Showing 1 changed file with 3 additions and 0 deletions.
3 changes: 3 additions & 0 deletions tests/python/quantization/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ def check_requantize(shape, min_calib_range=None, max_calib_range=None):
@with_seed()
def test_quantized_conv():
if mx.current_context().device_type != 'gpu':
print('skipped testing quantized_conv on cpu since it is not implemented yet')
return

def check_quantized_conv(data_shape, kernel, num_filter, pad, stride, no_bias):
Expand Down Expand Up @@ -191,6 +192,7 @@ def check_quantized_conv(data_shape, kernel, num_filter, pad, stride, no_bias):
@with_seed()
def test_quantized_pooling():
if mx.current_context().device_type != 'gpu':
print('skipped testing quantized_pooling on cpu since it is not implemented yet')
return

def check_quantized_pooling(data_shape, kernel, pool_type, pad, stride, global_pool):
Expand Down Expand Up @@ -236,6 +238,7 @@ def check_quantized_pooling(data_shape, kernel, pool_type, pad, stride, global_p
@with_seed()
def test_quantized_fc():
if mx.current_context().device_type != 'gpu':
print('skipped testing quantized_fc on cpu since it is not implemented yet')
return

def check_quantized_fc(data_shape, num_hidden, no_bias, flatten=True):
Expand Down

0 comments on commit 0a943c3

Please sign in to comment.