Skip to content

Commit

Permalink
[CodeStyle] replace assert np.allclose with `np.testing.assert_allc…
Browse files Browse the repository at this point in the history
…lose` and `assert np.array_equal` with `np.testing.assert_array_equal` (#55385)
  • Loading branch information
zrr1999 authored Aug 1, 2023
1 parent 4df4b9f commit 744e1ea
Show file tree
Hide file tree
Showing 82 changed files with 722 additions and 538 deletions.
4 changes: 2 additions & 2 deletions test/auto_parallel/random_control_unittest.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def compare_mask_between_ranks(
mask_tensor_remote = paddle.ones_like(mask_tensor_local)
dy_broadcast_helper(mask_tensor_remote)
if equal:
assert np.array_equal(
np.testing.assert_array_equal(
mask_tensor_remote.numpy(), mask_tensor_local.numpy()
)
else:
Expand Down Expand Up @@ -205,7 +205,7 @@ def test_random_ctrl_with_recompute(self):
for i in range(7):
mask_fw = mask_np_list[i].astype("float32")
mask_rc = mask_np_list[i + 7].astype("float32")
assert np.array_equal(
np.testing.assert_array_equal(
mask_fw,
mask_rc,
)
Expand Down
2 changes: 1 addition & 1 deletion test/collective/fleet/dygraph_dist_save_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def step_check(path1, path2):
m1 = paddle.load(path1)
m2 = paddle.load(path2)
for v1, v2 in zip(m1, m2):
assert np.allclose(v1.numpy(), v2.numpy())
np.testing.assert_allclose(v1.numpy(), v2.numpy())
print(f"value same: {v1.name}")


Expand Down
2 changes: 1 addition & 1 deletion test/collective/fleet/dygraph_save_for_auto_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def step_check(output_dir):
m1 = np.load(p1).reshape(-1)
m2 = np.load(p2).reshape(-1)
try:
assert np.allclose(m1, m2, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(m1, m2, rtol=1e-5, atol=1e-6)
except:
diff = m1 - m2
logger.error(f"max diff{diff.max()}, min diff: {diff.min()}")
Expand Down
2 changes: 1 addition & 1 deletion test/collective/fleet/fused_attention_pass_with_mp.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def get_rst(self, use_pass=False):
def test_pass(self):
fused_rst = self.get_rst(use_pass=True)
non_fused_rst = self.get_rst()
assert np.allclose(fused_rst, non_fused_rst, atol=1e-5)
np.testing.assert_allclose(fused_rst, non_fused_rst, atol=1e-5)


if __name__ == "__main__":
Expand Down
16 changes: 8 additions & 8 deletions test/collective/fleet/hybrid_parallel_communicate_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,28 +58,28 @@ def test_all(self):
sync_op=True,
)
if dp_rank == 0:
assert np.array_equal(result, self.tensor2)
np.testing.assert_array_equal(result, self.tensor2)
elif dp_rank == 1:
assert np.array_equal(result, self.tensor1)
np.testing.assert_array_equal(result, self.tensor1)
print("test scatter api ok")

paddle.distributed.broadcast(result, src=1, group=dp_gp, sync_op=True)
assert np.array_equal(result, self.tensor1)
np.testing.assert_array_equal(result, self.tensor1)
print("test broadcast api ok")

paddle.distributed.reduce(
result, dst=dp_src_rank, group=dp_gp, sync_op=True
)
if dp_rank == 0:
assert np.array_equal(
np.testing.assert_array_equal(
result, paddle.add(self.tensor1, self.tensor1)
)
elif dp_rank == 1:
assert np.array_equal(result, self.tensor1)
np.testing.assert_array_equal(result, self.tensor1)
print("test reduce api ok")

paddle.distributed.all_reduce(result, sync_op=True)
assert np.array_equal(
np.testing.assert_array_equal(
result,
paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1),
)
Expand All @@ -93,8 +93,8 @@ def test_all(self):
paddle.distributed.all_gather(
result, self.tensor1, group=dp_gp, sync_op=True
)
assert np.array_equal(result[0], self.tensor1)
assert np.array_equal(result[1], self.tensor1)
np.testing.assert_array_equal(result[0], self.tensor1)
np.testing.assert_array_equal(result[1], self.tensor1)
print("test all_gather api ok")

paddle.distributed.barrier(group=dp_gp)
Expand Down
16 changes: 8 additions & 8 deletions test/collective/fleet/new_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,26 +36,26 @@ def test_all(self):
result, [self.tensor2, self.tensor1], src=0, group=gp, sync_op=True
)
if gp.rank == 0:
assert np.array_equal(result, self.tensor2)
np.testing.assert_array_equal(result, self.tensor2)
elif gp.rank == 1:
assert np.array_equal(result, self.tensor1)
np.testing.assert_array_equal(result, self.tensor1)
print("test scatter api ok")

paddle.distributed.broadcast(result, src=1, group=gp, sync_op=True)
assert np.array_equal(result, self.tensor1)
np.testing.assert_array_equal(result, self.tensor1)
print("test broadcast api ok")

paddle.distributed.reduce(result, dst=0, group=gp, sync_op=True)
if gp.rank == 0:
assert np.array_equal(
np.testing.assert_array_equal(
result, paddle.add(self.tensor1, self.tensor1)
)
elif gp.rank == 1:
assert np.array_equal(result, self.tensor1)
np.testing.assert_array_equal(result, self.tensor1)
print("test reduce api ok")

paddle.distributed.all_reduce(result, sync_op=True)
assert np.array_equal(
np.testing.assert_array_equal(
result,
paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1),
)
Expand All @@ -69,8 +69,8 @@ def test_all(self):
paddle.distributed.all_gather(
result, self.tensor1, group=gp, sync_op=True
)
assert np.array_equal(result[0], self.tensor1)
assert np.array_equal(result[1], self.tensor1)
np.testing.assert_array_equal(result[0], self.tensor1)
np.testing.assert_array_equal(result[1], self.tensor1)
print("test all_gather api ok")

paddle.distributed.barrier(group=gp)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,8 @@ def scale(self):
data = paddle.rand([10, 1024])
scaler = paddle.amp.AmpScaler(init_loss_scaling=1024)
scaled_data = scaler.scale(data)
self.assertEqual(
np.array_equal(scaled_data.numpy(), data.numpy() * 1024), True
np.testing.assert_array_equal(
scaled_data.numpy(), data.numpy() * 1024
)

def test_scale(self):
Expand Down
2 changes: 1 addition & 1 deletion test/collective/multinode/dygraph_hybrid_dp.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def check_pass(self, *args, **kwargs):
paddle.distributed.collective.all_reduce(data_part)
data_reduced = data_part
data_sumed = np.sum(data, axis=0)
assert np.allclose(
np.testing.assert_allclose(
data_sumed, data_reduced.numpy(), rtol=1e-8, atol=1e-8
)

Expand Down
4 changes: 3 additions & 1 deletion test/collective/multinode/dygraph_hybrid_dpppmp.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,9 @@ def check_pass(self, *args, **kwargs):

loss_base_arr.append(loss_base.numpy())
loss_hybrid_arr.append(loss.numpy())
assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(
loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5
)


if __name__ == "__main__":
Expand Down
4 changes: 3 additions & 1 deletion test/collective/multinode/dygraph_hybrid_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,9 @@ def check_pass(self, *args, **kwargs):

loss_base_arr.append(loss_base.numpy())
loss_hybrid_arr.append(loss)
assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(
loss_base_arr, loss_hybrid_arr, rtol=1e-3, atol=1e-3
)


if __name__ == "__main__":
Expand Down
4 changes: 3 additions & 1 deletion test/collective/multinode/dygraph_hybrid_recompute.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,9 @@ def check_pass(self, *args, **kwargs):

loss_base_arr.append(loss_base.numpy())
loss_hybrid_arr.append(loss)
assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(
loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5
)


if __name__ == "__main__":
Expand Down
24 changes: 12 additions & 12 deletions test/collective/process_group_gloo.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ def test_create_process_group_gloo(self):
if rank == 0:
task = pg.allreduce(tensor_x, core.ReduceOp.MAX)
task.wait()
assert np.array_equal(tensor_x, max_result)
np.testing.assert_array_equal(tensor_x, max_result)
else:
task = pg.allreduce(tensor_y, core.ReduceOp.MAX)
task.wait()
assert np.array_equal(tensor_y, max_result)
np.testing.assert_array_equal(tensor_y, max_result)

print("test allreduce max api ok")

Expand All @@ -95,10 +95,10 @@ def test_create_process_group_gloo(self):
broadcast_result = paddle.assign(tensor_x)
if rank == 0:
task = pg.broadcast(tensor_x, 0)
assert np.array_equal(broadcast_result, tensor_x)
np.testing.assert_array_equal(broadcast_result, tensor_x)
else:
task = pg.broadcast(tensor_y, 0)
assert np.array_equal(broadcast_result, tensor_y)
np.testing.assert_array_equal(broadcast_result, tensor_y)
print("test broadcast api ok")

# test send_recv
Expand All @@ -116,11 +116,11 @@ def test_create_process_group_gloo(self):
task = pg.send(tensor_x, pg.size() - 1, True)
elif pg.rank() == pg.size() - 1:
task = pg.recv(tensor_y_1, 0, True)
assert np.array_equal(send_recv_result_1, tensor_y_1)
np.testing.assert_array_equal(send_recv_result_1, tensor_y_1)

if pg.rank() == 0:
task = pg.recv(tensor_x, pg.size() - 1, True)
assert np.array_equal(send_recv_result_2, tensor_x)
np.testing.assert_array_equal(send_recv_result_2, tensor_x)
elif pg.rank() == pg.size() - 1:
task = pg.send(tensor_y_2, 0, True)
print("test send_recv api ok")
Expand Down Expand Up @@ -159,8 +159,8 @@ def test_create_process_group_gloo(self):
out_2 = paddle.slice(
tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]
)
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
np.testing.assert_array_equal(tensor_x, out_1)
np.testing.assert_array_equal(tensor_y, out_2)
print("test allgather api ok\n")

# test Reduce
Expand All @@ -178,7 +178,7 @@ def test_create_process_group_gloo(self):
task = pg.reduce(tensor_y, 0)
task.wait()
if pg.rank() == 0:
assert np.array_equal(tensor_x, sum_result)
np.testing.assert_array_equal(tensor_x, sum_result)
print("test reduce sum api ok\n")

# test Scatter
Expand All @@ -199,9 +199,9 @@ def test_create_process_group_gloo(self):
out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]])
out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2])
if pg.rank() == 0:
assert np.array_equal(tensor_y, out1)
np.testing.assert_array_equal(tensor_y, out1)
else:
assert np.array_equal(tensor_y, out2)
np.testing.assert_array_equal(tensor_y, out2)
print("test scatter api ok\n")

# test Gather
Expand All @@ -219,7 +219,7 @@ def test_gather(root):
if pg.rank() == root:
task = pg.gather(tensor_y[root], tensor_x, root, True)
task.wait()
assert np.array_equal(tensor_x, tensor_y)
np.testing.assert_array_equal(tensor_x, tensor_y)
else:
task = pg.gather(tensor_y[pg.rank()], tensor_x, root, True)
task.wait()
Expand Down
Loading

0 comments on commit 744e1ea

Please sign in to comment.