Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AMP OP&Test] Append bf16/fp16 support 4 elementwise_max #51151

Merged
merged 11 commits into from
Mar 14, 2023
219 changes: 217 additions & 2 deletions python/paddle/fluid/tests/unittests/test_elementwise_max_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,32 @@ def test_check_grad_normal(self):

def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")
['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
)

def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')
['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
)


class TestElementwiseFP16Op(TestElementwiseOp):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

改一下类名,用TestElementwiseMaxFP16Op代替

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -67,6 +84,16 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.1, 1, []).astype("float16")
y = np.random.uniform(0.1, 1, []).astype("float16")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -77,6 +104,16 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
y = np.random.uniform(0.1, 1, []).astype("float16")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -87,6 +124,16 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.1, 1, []).astype("float16")
y = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


@unittest.skipIf(
core.is_compiled_with_cuda()
and (
Expand Down Expand Up @@ -131,6 +178,53 @@ def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'))


class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op):
def setUp(self):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

很多子类的setUp都复用了基类的一些代码,建议不同点用init_data等函数单独提取出来,减少重复代码

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.1, 1, []).astype("float32")
y = np.random.uniform(0.1, 1, []).astype("float32")
self.inputs = {
'X': convert_float_to_uint16(x),
'Y': convert_float_to_uint16(y),
}
self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}

def test_check_grad_normal(self):
if hasattr(self, 'attrs'):
self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False
)
else:
self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True
)

def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X")
)

def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y')
)


class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32")
self.inputs = {
'X': convert_float_to_uint16(x),
'Y': convert_float_to_uint16(y),
}
self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}
self.__class__.no_need_check_grad = True


@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast."
)
Expand All @@ -144,6 +238,16 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16")
y = np.array([0.5]).astype("float16")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -155,6 +259,31 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.random((100,)).astype("float16")
sgn = np.random.choice([-1, 1], (100,)).astype("float16")
y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float16")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.random((100,)).astype("float32")
sgn = np.random.choice([-1, 1], (100,)).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float32")
self.inputs = {
'X': convert_float_to_uint16(x),
'Y': convert_float_to_uint16(y),
}
self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}


class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -174,6 +303,25 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}


class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -193,6 +341,25 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}


class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -211,6 +378,24 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)
)
}


class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -230,6 +415,25 @@ def setUp(self):
}


class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
)
}


class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
Expand All @@ -242,5 +446,16 @@ def setUp(self):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16)
y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


if __name__ == '__main__':
unittest.main()