|
35 | 35 | @tvm.testing.parametrize_targets("llvm") |
36 | 36 | @pytest.mark.parametrize("shape,axis", [([4, 16], (1,)), ([4, 16, 16], (1, 2))]) |
37 | 37 | @pytest.mark.parametrize("dtype", ["float32", "float16"]) |
38 | | -def test_rms_norm(target, dev, shape, axis, dtype, episilon=1e-5, rtol=5e-4, atol=5e-4): |
| 38 | +def test_rms_norm(target, dev, shape, axis, dtype, episilon=1e-5, rtol=5e-3, atol=1e-4): |
39 | 39 | data = te.placeholder(shape, dtype=dtype, name="data") |
40 | 40 | scale_shape = [shape[dim] for dim in axis] |
41 | 41 | weight = te.placeholder(scale_shape, dtype=dtype, name="weight") |
42 | | - B = topi.nn.rms_norm(data, weight, axis, episilon) |
| 42 | + bias = te.placeholder(scale_shape, dtype=dtype, name="weight") |
| 43 | + B = topi.nn.rms_norm(data, weight, bias, axis, episilon) |
43 | 44 |
|
44 | 45 | data_np = np.random.uniform(size=shape).astype(dtype) |
45 | 46 | weight_np = np.random.uniform(size=scale_shape).astype(dtype) |
46 | | - b_np = tvm.topi.testing.rms_norm_python(data_np, weight_np, axis, episilon) |
| 47 | + bias_np = np.random.uniform(size=scale_shape).astype(dtype) |
| 48 | + b_np = tvm.topi.testing.rms_norm_python(data_np, weight_np, bias_np, axis, episilon) |
47 | 49 |
|
48 | 50 | with tvm.target.Target(target): |
49 | 51 | s_func = tvm.topi.testing.dispatch(target, _rms_norm_schedule) |
50 | 52 | s = s_func([B]) |
51 | 53 | data_tvm = tvm.nd.array(data_np, dev) |
52 | 54 | weight_tvm = tvm.nd.array(weight_np, dev) |
| 55 | + bias_tvm = tvm.nd.array(bias_np, dev) |
53 | 56 | b_tvm = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev) |
54 | | - f = tvm.build(s, [data, weight, B], target) |
55 | | - f(data_tvm, weight_tvm, b_tvm) |
| 57 | + f = tvm.build(s, [data, weight, bias, B], target) |
| 58 | + f(data_tvm, weight_tvm, bias_tvm, b_tvm) |
56 | 59 | tvm.testing.assert_allclose(b_tvm.numpy(), b_np, rtol=rtol, atol=atol) |
57 | 60 |
|
58 | 61 |
|
|
0 commit comments