Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
float32 -> float16 cast consistency across implementations (#13857)
Browse files Browse the repository at this point in the history
* Added test showing float32->float16 discrepancy when mshadow float2half() is used.

* Temp update mshadow submodule SHA to point to PR368 (b211cb7).

* Temp switch to url = https://github.com/DickJC123/mshadow.git

* Updata mshadow submodule SHA.

* Improve code style per reviewer comments.

* Move back to dmlc/mshadow.git, now with float->half rounding.

* Expand test_operator.py:test_cast_float32_to_float16 to test np.nan.
  • Loading branch information
DickJC123 authored and szha committed Jan 30, 2019
1 parent 36a3cb8 commit c939c2d
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 1 deletion.
2 changes: 1 addition & 1 deletion 3rdparty/mshadow
Submodule mshadow updated 1 files
+94 −28 mshadow/half.h
42 changes: 42 additions & 0 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4000,6 +4000,48 @@ def test_cast():
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)


# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
def get_data():
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan

input_np = np.array(list(get_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)

x = mx.sym.Variable('x', dtype=np.float32)
sym = mx.sym.Cast(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x' : mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)


@with_seed()
def test_repeat():
def test_repeat_forward():
Expand Down

0 comments on commit c939c2d

Please sign in to comment.