diff --git a/test/auto_parallel/random_control_unittest.py b/test/auto_parallel/random_control_unittest.py index 6fcee26d21201..addc384699885 100644 --- a/test/auto_parallel/random_control_unittest.py +++ b/test/auto_parallel/random_control_unittest.py @@ -86,7 +86,7 @@ def compare_mask_between_ranks( mask_tensor_remote = paddle.ones_like(mask_tensor_local) dy_broadcast_helper(mask_tensor_remote) if equal: - assert np.array_equal( + np.testing.assert_array_equal( mask_tensor_remote.numpy(), mask_tensor_local.numpy() ) else: @@ -205,7 +205,7 @@ def test_random_ctrl_with_recompute(self): for i in range(7): mask_fw = mask_np_list[i].astype("float32") mask_rc = mask_np_list[i + 7].astype("float32") - assert np.array_equal( + np.testing.assert_array_equal( mask_fw, mask_rc, ) diff --git a/test/collective/fleet/dygraph_dist_save_load.py b/test/collective/fleet/dygraph_dist_save_load.py index 0d4b50a9eebc4..b2e6216f54612 100644 --- a/test/collective/fleet/dygraph_dist_save_load.py +++ b/test/collective/fleet/dygraph_dist_save_load.py @@ -208,7 +208,7 @@ def step_check(path1, path2): m1 = paddle.load(path1) m2 = paddle.load(path2) for v1, v2 in zip(m1, m2): - assert np.allclose(v1.numpy(), v2.numpy()) + np.testing.assert_allclose(v1.numpy(), v2.numpy()) print(f"value same: {v1.name}") diff --git a/test/collective/fleet/dygraph_save_for_auto_infer.py b/test/collective/fleet/dygraph_save_for_auto_infer.py index 724267fbaf736..f184659197d52 100644 --- a/test/collective/fleet/dygraph_save_for_auto_infer.py +++ b/test/collective/fleet/dygraph_save_for_auto_infer.py @@ -267,7 +267,7 @@ def step_check(output_dir): m1 = np.load(p1).reshape(-1) m2 = np.load(p2).reshape(-1) try: - assert np.allclose(m1, m2, rtol=1e-5, atol=1e-6) + np.testing.assert_allclose(m1, m2, rtol=1e-5, atol=1e-6) except: diff = m1 - m2 logger.error(f"max diff{diff.max()}, min diff: {diff.min()}") diff --git a/test/collective/fleet/fused_attention_pass_with_mp.py b/test/collective/fleet/fused_attention_pass_with_mp.py index c8a1673e6630d..2f1e657cfc8be 100644 --- a/test/collective/fleet/fused_attention_pass_with_mp.py +++ b/test/collective/fleet/fused_attention_pass_with_mp.py @@ -234,7 +234,7 @@ def get_rst(self, use_pass=False): def test_pass(self): fused_rst = self.get_rst(use_pass=True) non_fused_rst = self.get_rst() - assert np.allclose(fused_rst, non_fused_rst, atol=1e-5) + np.testing.assert_allclose(fused_rst, non_fused_rst, atol=1e-5) if __name__ == "__main__": diff --git a/test/collective/fleet/hybrid_parallel_communicate_group.py b/test/collective/fleet/hybrid_parallel_communicate_group.py index e89e807ae4309..1b9febf04f874 100644 --- a/test/collective/fleet/hybrid_parallel_communicate_group.py +++ b/test/collective/fleet/hybrid_parallel_communicate_group.py @@ -58,28 +58,28 @@ def test_all(self): sync_op=True, ) if dp_rank == 0: - assert np.array_equal(result, self.tensor2) + np.testing.assert_array_equal(result, self.tensor2) elif dp_rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test scatter api ok") paddle.distributed.broadcast(result, src=1, group=dp_gp, sync_op=True) - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test broadcast api ok") paddle.distributed.reduce( result, dst=dp_src_rank, group=dp_gp, sync_op=True ) if dp_rank == 0: - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(self.tensor1, self.tensor1) ) elif dp_rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test reduce api ok") paddle.distributed.all_reduce(result, sync_op=True) - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1), ) @@ -93,8 +93,8 @@ def test_all(self): paddle.distributed.all_gather( result, self.tensor1, group=dp_gp, sync_op=True ) - assert np.array_equal(result[0], self.tensor1) - assert np.array_equal(result[1], self.tensor1) + np.testing.assert_array_equal(result[0], self.tensor1) + np.testing.assert_array_equal(result[1], self.tensor1) print("test all_gather api ok") paddle.distributed.barrier(group=dp_gp) diff --git a/test/collective/fleet/new_group.py b/test/collective/fleet/new_group.py index 1945bae835571..deeea06665019 100644 --- a/test/collective/fleet/new_group.py +++ b/test/collective/fleet/new_group.py @@ -36,26 +36,26 @@ def test_all(self): result, [self.tensor2, self.tensor1], src=0, group=gp, sync_op=True ) if gp.rank == 0: - assert np.array_equal(result, self.tensor2) + np.testing.assert_array_equal(result, self.tensor2) elif gp.rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test scatter api ok") paddle.distributed.broadcast(result, src=1, group=gp, sync_op=True) - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test broadcast api ok") paddle.distributed.reduce(result, dst=0, group=gp, sync_op=True) if gp.rank == 0: - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(self.tensor1, self.tensor1) ) elif gp.rank == 1: - assert np.array_equal(result, self.tensor1) + np.testing.assert_array_equal(result, self.tensor1) print("test reduce api ok") paddle.distributed.all_reduce(result, sync_op=True) - assert np.array_equal( + np.testing.assert_array_equal( result, paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1), ) @@ -69,8 +69,8 @@ def test_all(self): paddle.distributed.all_gather( result, self.tensor1, group=gp, sync_op=True ) - assert np.array_equal(result[0], self.tensor1) - assert np.array_equal(result[1], self.tensor1) + np.testing.assert_array_equal(result[0], self.tensor1) + np.testing.assert_array_equal(result[1], self.tensor1) print("test all_gather api ok") paddle.distributed.barrier(group=gp) diff --git a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 5de19dfb4113b..6f71afb296efb 100644 --- a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -201,8 +201,8 @@ def scale(self): data = paddle.rand([10, 1024]) scaler = paddle.amp.AmpScaler(init_loss_scaling=1024) scaled_data = scaler.scale(data) - self.assertEqual( - np.array_equal(scaled_data.numpy(), data.numpy() * 1024), True + np.testing.assert_array_equal( + scaled_data.numpy(), data.numpy() * 1024 ) def test_scale(self): diff --git a/test/collective/multinode/dygraph_hybrid_dp.py b/test/collective/multinode/dygraph_hybrid_dp.py index 3d2f8f9e5d3b3..d31177d9813c0 100644 --- a/test/collective/multinode/dygraph_hybrid_dp.py +++ b/test/collective/multinode/dygraph_hybrid_dp.py @@ -39,7 +39,7 @@ def check_pass(self, *args, **kwargs): paddle.distributed.collective.all_reduce(data_part) data_reduced = data_part data_sumed = np.sum(data, axis=0) - assert np.allclose( + np.testing.assert_allclose( data_sumed, data_reduced.numpy(), rtol=1e-8, atol=1e-8 ) diff --git a/test/collective/multinode/dygraph_hybrid_dpppmp.py b/test/collective/multinode/dygraph_hybrid_dpppmp.py index 55a130e3f6ad7..4a52d305090b8 100644 --- a/test/collective/multinode/dygraph_hybrid_dpppmp.py +++ b/test/collective/multinode/dygraph_hybrid_dpppmp.py @@ -197,7 +197,9 @@ def check_pass(self, *args, **kwargs): loss_base_arr.append(loss_base.numpy()) loss_hybrid_arr.append(loss.numpy()) - assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose( + loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5 + ) if __name__ == "__main__": diff --git a/test/collective/multinode/dygraph_hybrid_fp16.py b/test/collective/multinode/dygraph_hybrid_fp16.py index 34319a6c018c6..6bd3e4390a190 100644 --- a/test/collective/multinode/dygraph_hybrid_fp16.py +++ b/test/collective/multinode/dygraph_hybrid_fp16.py @@ -210,7 +210,9 @@ def check_pass(self, *args, **kwargs): loss_base_arr.append(loss_base.numpy()) loss_hybrid_arr.append(loss) - assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-3, atol=1e-3) + np.testing.assert_allclose( + loss_base_arr, loss_hybrid_arr, rtol=1e-3, atol=1e-3 + ) if __name__ == "__main__": diff --git a/test/collective/multinode/dygraph_hybrid_recompute.py b/test/collective/multinode/dygraph_hybrid_recompute.py index 1902b716296d8..6de5e336acf3f 100644 --- a/test/collective/multinode/dygraph_hybrid_recompute.py +++ b/test/collective/multinode/dygraph_hybrid_recompute.py @@ -186,7 +186,9 @@ def check_pass(self, *args, **kwargs): loss_base_arr.append(loss_base.numpy()) loss_hybrid_arr.append(loss) - assert np.allclose(loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5) + np.testing.assert_allclose( + loss_base_arr, loss_hybrid_arr, rtol=1e-5, atol=1e-5 + ) if __name__ == "__main__": diff --git a/test/collective/process_group_gloo.py b/test/collective/process_group_gloo.py index 20dcae5928ad9..b6ae187cc5ffa 100644 --- a/test/collective/process_group_gloo.py +++ b/test/collective/process_group_gloo.py @@ -76,11 +76,11 @@ def test_create_process_group_gloo(self): if rank == 0: task = pg.allreduce(tensor_x, core.ReduceOp.MAX) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = pg.allreduce(tensor_y, core.ReduceOp.MAX) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -95,10 +95,10 @@ def test_create_process_group_gloo(self): broadcast_result = paddle.assign(tensor_x) if rank == 0: task = pg.broadcast(tensor_x, 0) - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = pg.broadcast(tensor_y, 0) - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) print("test broadcast api ok") # test send_recv @@ -116,11 +116,11 @@ def test_create_process_group_gloo(self): task = pg.send(tensor_x, pg.size() - 1, True) elif pg.rank() == pg.size() - 1: task = pg.recv(tensor_y_1, 0, True) - assert np.array_equal(send_recv_result_1, tensor_y_1) + np.testing.assert_array_equal(send_recv_result_1, tensor_y_1) if pg.rank() == 0: task = pg.recv(tensor_x, pg.size() - 1, True) - assert np.array_equal(send_recv_result_2, tensor_x) + np.testing.assert_array_equal(send_recv_result_2, tensor_x) elif pg.rank() == pg.size() - 1: task = pg.send(tensor_y_2, 0, True) print("test send_recv api ok") @@ -159,8 +159,8 @@ def test_create_process_group_gloo(self): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api ok\n") # test Reduce @@ -178,7 +178,7 @@ def test_create_process_group_gloo(self): task = pg.reduce(tensor_y, 0) task.wait() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") # test Scatter @@ -199,9 +199,9 @@ def test_create_process_group_gloo(self): out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2) + np.testing.assert_array_equal(tensor_y, out2) print("test scatter api ok\n") # test Gather @@ -219,7 +219,7 @@ def test_gather(root): if pg.rank() == root: task = pg.gather(tensor_y[root], tensor_x, root, True) task.wait() - assert np.array_equal(tensor_x, tensor_y) + np.testing.assert_array_equal(tensor_x, tensor_y) else: task = pg.gather(tensor_y[pg.rank()], tensor_x, root, True) task.wait() diff --git a/test/collective/process_group_mpi.py b/test/collective/process_group_mpi.py index f2fc9c498b4e8..b5d9fd4b74377 100644 --- a/test/collective/process_group_mpi.py +++ b/test/collective/process_group_mpi.py @@ -69,10 +69,10 @@ def test_allreduce_sum(pg, shape, dtype): sum_result = tensor_x + tensor_y if pg.rank() == 0: task = dist.all_reduce(tensor_x) - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) else: task = dist.all_reduce(tensor_y) - assert np.array_equal(tensor_y, sum_result) + np.testing.assert_array_equal(tensor_y, sum_result) print("test allreduce sum api ok") @@ -91,13 +91,13 @@ def test_allreduce_max(pg, shape, dtype): tensor_x, dist.ReduceOp.MAX, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.all_reduce( tensor_y, dist.ReduceOp.MAX, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -116,13 +116,13 @@ def test_allreduce_min(pg, shape, dtype): tensor_x, dist.ReduceOp.MIN, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.all_reduce( tensor_y, dist.ReduceOp.MIN, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_y, min_result) + np.testing.assert_array_equal(tensor_y, min_result) print("test allreduce min api ok") @@ -141,13 +141,13 @@ def test_allreduce_prod(pg, shape, dtype): tensor_x, dist.ReduceOp.PROD, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.all_reduce( tensor_y, dist.ReduceOp.PROD, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_y, prod_result) + np.testing.assert_array_equal(tensor_y, prod_result) print("test allreduce prod api ok") @@ -164,10 +164,10 @@ def test_broadcast(pg, shape, dtype): task = dist.broadcast(tensor_x, 0, use_calc_stream=False) task.synchronize() assert task.is_completed() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) print("test broadcast api ok") @@ -205,8 +205,8 @@ def test_allgather(pg, shape, dtype): tensor_out = paddle.concat(tensor_out_list) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api ok\n") if pg.rank() == 0: @@ -219,8 +219,8 @@ def test_allgather(pg, shape, dtype): tensor_out = paddle.concat(tensor_out_list) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api2 ok\n") @@ -249,9 +249,9 @@ def test_all2all(pg, shape, dtype): out1_2 = paddle.slice(tensor_out1, [0], [shape[0] // 2], [shape[0]]) out2_1 = paddle.slice(tensor_out2, [0], [0], [shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api ok\n") x = np.random.random(shape).astype(dtype) @@ -277,9 +277,9 @@ def test_all2all(pg, shape, dtype): out1_2 = paddle.slice(tensor_out1, [0], [shape[0] // 2], [shape[0]]) out2_1 = paddle.slice(tensor_out2, [0], [0], [shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api2 ok\n") @@ -297,7 +297,7 @@ def test_reduce_sum(pg, shape, dtype): task = dist.reduce(tensor_y, 0, use_calc_stream=False) task.wait() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") @@ -316,7 +316,7 @@ def test_reduce_max(pg, shape, dtype): tensor_x, 0, dist.ReduceOp.MAX, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.reduce( tensor_y, 0, dist.ReduceOp.MAX, use_calc_stream=False @@ -340,7 +340,7 @@ def test_reduce_min(pg, shape, dtype): tensor_x, 0, dist.ReduceOp.MIN, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.reduce( tensor_y, 0, dist.ReduceOp.MIN, use_calc_stream=False @@ -364,7 +364,7 @@ def test_reduce_prod(pg, shape, dtype): tensor_x, 0, dist.ReduceOp.PROD, use_calc_stream=False ) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.reduce( tensor_y, 0, dist.ReduceOp.PROD, use_calc_stream=False @@ -391,9 +391,9 @@ def test_scatter(pg, shape, dtype): out1 = paddle.slice(tensor_x, [0], [0], [shape[0]]) out2 = paddle.slice(tensor_x, [0], [shape[0]], [shape[0] * 2]) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2) + np.testing.assert_array_equal(tensor_y, out2) print("test scatter api ok\n") @@ -411,7 +411,7 @@ def test_send_recv(pg, sub_group, shape, dtype): elif pg.rank() == 1: task = dist.recv(tensor_y, 0, group=sub_group, use_calc_stream=False) task.wait() - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") @@ -427,7 +427,7 @@ def test_send_recv(pg, sub_group, shape, dtype): task = dist.send(tensor_x, 1, group=sub_group, use_calc_stream=True) elif pg.rank() == 1: task = dist.recv(tensor_y, 0, group=sub_group, use_calc_stream=True) - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") diff --git a/test/collective/process_group_nccl.py b/test/collective/process_group_nccl.py index 713e0a01b4abb..67815eab2bc83 100644 --- a/test/collective/process_group_nccl.py +++ b/test/collective/process_group_nccl.py @@ -64,10 +64,10 @@ def test_create_process_group_nccl(self): sum_result = tensor_x + tensor_y if pg.rank() == 0: task = dist.all_reduce(tensor_x) - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) else: task = dist.all_reduce(tensor_y) - assert np.array_equal(tensor_y, sum_result) + np.testing.assert_array_equal(tensor_y, sum_result) print("test allreduce sum api ok") @@ -82,10 +82,10 @@ def test_create_process_group_nccl(self): sum_result = tensor_x + tensor_y if pg.rank() == 0: task = dist.all_reduce(tensor_x) - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) else: task = dist.all_reduce(tensor_y) - assert np.array_equal(tensor_y, sum_result) + np.testing.assert_array_equal(tensor_y, sum_result) print("test allreduce sum api with = [] ok") @@ -102,11 +102,11 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -123,11 +123,11 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_y, max_result) + np.testing.assert_array_equal(tensor_y, max_result) print("test allreduce max api with shape = [] ok") @@ -144,11 +144,11 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_y, min_result) + np.testing.assert_array_equal(tensor_y, min_result) print("test allreduce min api ok") @@ -165,11 +165,11 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_y, min_result) + np.testing.assert_array_equal(tensor_y, min_result) print("test allreduce min api with shape [] ok") @@ -186,11 +186,11 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_y, prod_result) + np.testing.assert_array_equal(tensor_y, prod_result) print("test allreduce prod api ok") @@ -207,11 +207,11 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.all_reduce(tensor_x, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.all_reduce(tensor_y, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_y, prod_result) + np.testing.assert_array_equal(tensor_y, prod_result) print("test allreduce prod api with shape = [] ok") @@ -229,11 +229,11 @@ def test_create_process_group_nccl(self): task.synchronize() paddle.device.cuda.synchronize() assert task.is_completed() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) paddle.device.cuda.synchronize() - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) print("test broadcast api ok") @@ -251,11 +251,11 @@ def test_create_process_group_nccl(self): task.synchronize() paddle.device.cuda.synchronize() assert task.is_completed() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) paddle.device.cuda.synchronize() - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) assert tensor_y.shape == [] print("test broadcast api with shape=[] ok") @@ -298,8 +298,8 @@ def test_create_process_group_nccl(self): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api ok\n") if pg.rank() == 0: @@ -316,8 +316,8 @@ def test_create_process_group_nccl(self): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api2 ok\n") # test allgather with shape = [] @@ -337,8 +337,8 @@ def test_create_process_group_nccl(self): paddle.device.cuda.synchronize() out_1 = tensor_out_list[0] out_2 = tensor_out_list[1] - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) print("test allgather api with shape [] ok\n") # test alltoall @@ -371,9 +371,11 @@ def test_create_process_group_nccl(self): ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal( + out1_2.numpy(), raw_tensor_y_1.numpy() + ) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api ok\n") x = np.random.random(self.shape).astype(self.dtype) @@ -404,9 +406,11 @@ def test_create_process_group_nccl(self): ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: - assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) + np.testing.assert_array_equal( + out1_2.numpy(), raw_tensor_y_1.numpy() + ) else: - assert np.array_equal(out2_1, raw_tensor_x_2) + np.testing.assert_array_equal(out2_1, raw_tensor_x_2) print("test alltoall api2 ok\n") # test Reduce @@ -425,7 +429,7 @@ def test_create_process_group_nccl(self): task.wait() paddle.device.cuda.synchronize() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") # test reduce max @@ -441,7 +445,7 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.reduce(tensor_x, 0, dist.ReduceOp.MAX, sync_op=False) task.wait() - assert np.array_equal(tensor_x, max_result) + np.testing.assert_array_equal(tensor_x, max_result) else: task = dist.reduce(tensor_y, 0, dist.ReduceOp.MAX, sync_op=False) task.wait() @@ -461,7 +465,7 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.reduce(tensor_x, 0, dist.ReduceOp.MIN, sync_op=False) task.wait() - assert np.array_equal(tensor_x, min_result) + np.testing.assert_array_equal(tensor_x, min_result) else: task = dist.reduce(tensor_y, 0, dist.ReduceOp.MIN, sync_op=False) task.wait() @@ -481,7 +485,7 @@ def test_create_process_group_nccl(self): if pg.rank() == 0: task = dist.reduce(tensor_x, 0, dist.ReduceOp.PROD, sync_op=False) task.wait() - assert np.array_equal(tensor_x, prod_result) + np.testing.assert_array_equal(tensor_x, prod_result) else: task = dist.reduce(tensor_y, 0, dist.ReduceOp.PROD, sync_op=False) task.wait() @@ -511,9 +515,9 @@ def test_create_process_group_nccl(self): out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2) + np.testing.assert_array_equal(tensor_y, out2) print("test scatter api ok\n") # test Scatter with shape=[] @@ -534,9 +538,9 @@ def test_create_process_group_nccl(self): out1 = paddle.assign(tensor_x) out2 = paddle.assign(tensor_x + 1) if pg.rank() == 0: - assert np.array_equal(tensor_y, out1) + np.testing.assert_array_equal(tensor_y, out1) else: - assert np.array_equal(tensor_y, out2), f"{tensor_y}, {out2}" + np.testing.assert_array_equal(tensor_y, out2) assert tensor_y.shape == [] print("test scatter api with shape=[] ok\n") @@ -554,7 +558,7 @@ def test_create_process_group_nccl(self): else: task = dist.recv(tensor_y, 0, sync_op=False) task.wait() - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") @@ -570,7 +574,7 @@ def test_create_process_group_nccl(self): task = dist.send(tensor_x, 1, sync_op=True) else: task = dist.recv(tensor_y, 0, sync_op=True) - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) print("test send api ok") diff --git a/test/collective/strategy_group.py b/test/collective/strategy_group.py index 5197cf632b402..a70fbdf151822 100644 --- a/test/collective/strategy_group.py +++ b/test/collective/strategy_group.py @@ -31,7 +31,7 @@ def _check_using_all_reduce(group): data = paddle.to_tensor([1, 2, 3]) result = paddle.to_tensor([2, 4, 6]) dist.all_reduce(data, group=group) - assert np.array_equal(data, result) + np.testing.assert_array_equal(data, result) def _check_using_send(group, dst): @@ -43,7 +43,7 @@ def _check_using_recv(group, src): result = paddle.to_tensor([1, 2, 3]) data = paddle.to_tensor([0, 0, 0]) dist.recv(data, src=src, group=group) - assert np.array_equal(data, result) + np.testing.assert_array_equal(data, result) class TestStrategyGroupAPI(unittest.TestCase): diff --git a/test/dygraph_to_static/test_cycle_gan.py b/test/dygraph_to_static/test_cycle_gan.py index 7974371fc0f13..19c14115afc0f 100644 --- a/test/dygraph_to_static/test_cycle_gan.py +++ b/test/dygraph_to_static/test_cycle_gan.py @@ -698,17 +698,13 @@ def test_train(self): st_out = self.train(to_static=True) dy_out = self.train(to_static=False) - assert_func = np.allclose # Note(Aurelius84): Because we disable BN on GPU, # but here we enhance the check on CPU by `np.array_equal` # which means the dy_out and st_out shall be exactly same. if not fluid.is_compiled_with_cuda(): - assert_func = np.array_equal - - self.assertTrue( - assert_func(dy_out, st_out), - msg=f"dy_out:\n {dy_out}\n st_out:\n{st_out}", - ) + np.testing.assert_array_equal(dy_out, st_out) + else: + np.testing.assert_allclose(dy_out, st_out, rtol=1e-5, atol=1e-8) if __name__ == "__main__": diff --git a/test/ir/inference/test_inference_predictor_run.py b/test/ir/inference/test_inference_predictor_run.py index 99ba29956c5da..c6a8c5db9f3c1 100644 --- a/test/ir/inference/test_inference_predictor_run.py +++ b/test/ir/inference/test_inference_predictor_run.py @@ -119,7 +119,7 @@ def test_output(self): inorder_output = self.get_inorder_output() disorder_output = self.get_disorder_output() - assert np.allclose( + np.testing.assert_allclose( inorder_output.numpy().flatten(), disorder_output.numpy().flatten() ) diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index c8984da8514d2..3657621614189 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -3294,9 +3294,15 @@ def test_api(self): fetch_list=[out_1, out_2, res, out_6], ) - assert np.allclose(res_1, np.power(input, 2)) - assert np.allclose(res_2, np.power(input, 3)) - assert np.allclose(res_6, np.power(input, 3)) + np.testing.assert_allclose( + res_1, np.power(input, 2), rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, np.power(input, 3), rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_6, np.power(input, 3), rtol=1e-5, atol=1e-8 + ) def ref_stanh(x, scale_a=0.67, scale_b=1.7159): diff --git a/test/legacy_test/test_adaptive_avg_pool2d.py b/test/legacy_test/test_adaptive_avg_pool2d.py index a2b0066235f87..663ac74781597 100644 --- a/test/legacy_test/test_adaptive_avg_pool2d.py +++ b/test/legacy_test/test_adaptive_avg_pool2d.py @@ -148,15 +148,21 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -169,36 +175,38 @@ def test_dynamic_graph(self): out_1 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3] ) - out_2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=5) - out_3 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[2, 5] ) - out_4 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3], data_format="NHWC" ) - out_5 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[None, 3] ) - out_6 = paddle.nn.functional.interpolate( x=x, mode="area", size=[2, 5] ) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) - - assert np.allclose(out_6.numpy(), self.res_3_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_6.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): @@ -260,15 +268,21 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -297,15 +311,21 @@ def test_dynamic_graph(self): ) out_5 = adaptive_avg_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) class TestOutputSizeTensor(UnittestBase): diff --git a/test/legacy_test/test_adaptive_avg_pool3d.py b/test/legacy_test/test_adaptive_avg_pool3d.py index 99afe85996ce6..d5054ba2107af 100755 --- a/test/legacy_test/test_adaptive_avg_pool3d.py +++ b/test/legacy_test/test_adaptive_avg_pool3d.py @@ -169,15 +169,21 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -209,17 +215,24 @@ def test_dynamic_graph(self): x=x, mode="area", size=[2, 3, 5] ) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) - - assert np.allclose(out_6.numpy(), self.res_3_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_6.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): @@ -288,15 +301,21 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.allclose(res_1, self.res_1_np) - - assert np.allclose(res_2, self.res_2_np) - - assert np.allclose(res_3, self.res_3_np) - - assert np.allclose(res_4, self.res_4_np) - - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose( + res_1, self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_2, self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_3, self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_4, self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + res_5, self.res_5_np, rtol=1e-5, atol=1e-8 + ) def test_dynamic_graph(self): for use_cuda in ( @@ -329,15 +348,21 @@ def test_dynamic_graph(self): ) out_5 = adaptive_avg_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) - - assert np.allclose(out_2.numpy(), self.res_2_np) - - assert np.allclose(out_3.numpy(), self.res_3_np) - - assert np.allclose(out_4.numpy(), self.res_4_np) - - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose( + out_1.numpy(), self.res_1_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_2.numpy(), self.res_2_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_3.numpy(), self.res_3_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_4.numpy(), self.res_4_np, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + out_5.numpy(), self.res_5_np, rtol=1e-5, atol=1e-8 + ) if __name__ == '__main__': diff --git a/test/legacy_test/test_adaptive_max_pool2d.py b/test/legacy_test/test_adaptive_max_pool2d.py index 62a4dee7e58a0..104271b955257 100644 --- a/test/legacy_test/test_adaptive_max_pool2d.py +++ b/test/legacy_test/test_adaptive_max_pool2d.py @@ -149,15 +149,15 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # assert np.allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -184,15 +184,15 @@ def test_dynamic_graph(self): x=x, output_size=[None, 3] ) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) - # assert np.allclose(out_4.numpy(), self.res_4_np) + # np.testing.assert_allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): @@ -255,15 +255,15 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # assert np.allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -291,15 +291,15 @@ def test_dynamic_graph(self): ) out_5 = adaptive_max_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) - # assert np.allclose(out_4.numpy(), self.res_4_np) + # np.testing.assert_allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestOutDtype(unittest.TestCase): diff --git a/test/legacy_test/test_adaptive_max_pool3d.py b/test/legacy_test/test_adaptive_max_pool3d.py index f221964097001..13eed4823d88f 100755 --- a/test/legacy_test/test_adaptive_max_pool3d.py +++ b/test/legacy_test/test_adaptive_max_pool3d.py @@ -170,15 +170,15 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) - # assert np.allclose(res_4, self.res_4_np) + # np.testing.assert_allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -205,15 +205,15 @@ def test_dynamic_graph(self): x=x, output_size=[None, 3, None] ) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) - # assert np.allclose(out_4.numpy(), self.res_4_np) + # np.testing.assert_allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): @@ -280,15 +280,15 @@ def test_static_graph(self): fetch_list=[out_1, out_2, out_3, out_5], ) - assert np.allclose(res_1, self.res_1_np) + np.testing.assert_allclose(res_1, self.res_1_np) - assert np.allclose(res_2, self.res_2_np) + np.testing.assert_allclose(res_2, self.res_2_np) - assert np.allclose(res_3, self.res_3_np) + np.testing.assert_allclose(res_3, self.res_3_np) # assert np.allclose(res_4, self.res_4_np) - assert np.allclose(res_5, self.res_5_np) + np.testing.assert_allclose(res_5, self.res_5_np) def test_dynamic_graph(self): for use_cuda in ( @@ -320,15 +320,15 @@ def test_dynamic_graph(self): ) out_5 = adaptive_max_pool(x=x) - assert np.allclose(out_1.numpy(), self.res_1_np) + np.testing.assert_allclose(out_1.numpy(), self.res_1_np) - assert np.allclose(out_2.numpy(), self.res_2_np) + np.testing.assert_allclose(out_2.numpy(), self.res_2_np) - assert np.allclose(out_3.numpy(), self.res_3_np) + np.testing.assert_allclose(out_3.numpy(), self.res_3_np) # assert np.allclose(out_4.numpy(), self.res_4_np) - assert np.allclose(out_5.numpy(), self.res_5_np) + np.testing.assert_allclose(out_5.numpy(), self.res_5_np) class TestOutDtype(unittest.TestCase): diff --git a/test/legacy_test/test_addmm_op.py b/test/legacy_test/test_addmm_op.py index 66a86961e885d..1f92270cbeeac 100644 --- a/test/legacy_test/test_addmm_op.py +++ b/test/legacy_test/test_addmm_op.py @@ -328,7 +328,9 @@ def test_api_with_dygraph(self): x = fluid.dygraph.to_variable(np_x) y = fluid.dygraph.to_variable(np_y) out = paddle.tensor.addmm(input, x, y) - assert np.allclose(np_input + np.dot(np_x, np_y), out.numpy()) + np.testing.assert_allclose( + np_input + np.dot(np_x, np_y), out.numpy(), rtol=1e-5, atol=1e-8 + ) class TestAddMMAPI(unittest.TestCase): diff --git a/test/legacy_test/test_batch_norm_op_v2.py b/test/legacy_test/test_batch_norm_op_v2.py index a55c478996678..618513a0d044b 100644 --- a/test/legacy_test/test_batch_norm_op_v2.py +++ b/test/legacy_test/test_batch_norm_op_v2.py @@ -173,7 +173,7 @@ def compute_v2(x): bn = paddle.nn.BatchNorm2D(shape[1]) eag_y = bn(paddle.to_tensor(x)) - assert np.allclose(eag_y.numpy(), y.numpy()) + np.testing.assert_allclose(eag_y.numpy(), y.numpy()) return y.numpy() def compute_v3(x, is_test, trainable_statistics): @@ -351,10 +351,10 @@ def test_1d_opt(self): y.backward() y2.backward() - assert np.allclose( + np.testing.assert_allclose( y.numpy().flatten(), y2.numpy().flatten(), atol=1e-5, rtol=1e-5 ) - assert np.allclose( + np.testing.assert_allclose( bn1d.weight.grad.numpy().flatten(), bn2d.weight.grad.numpy().flatten(), atol=1e-5, diff --git a/test/legacy_test/test_broadcast_to_op.py b/test/legacy_test/test_broadcast_to_op.py index 63d3b91417512..e2da6a1117296 100644 --- a/test/legacy_test/test_broadcast_to_op.py +++ b/test/legacy_test/test_broadcast_to_op.py @@ -66,9 +66,9 @@ def test_api(self): }, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) def test_api_fp16_gpu(self): if paddle.fluid.core.is_compiled_with_cuda(): @@ -101,9 +101,9 @@ def test_api_fp16_gpu(self): }, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) if __name__ == "__main__": diff --git a/test/legacy_test/test_channel_shuffle.py b/test/legacy_test/test_channel_shuffle.py index efecb886a757f..f8b6ef1df9514 100644 --- a/test/legacy_test/test_channel_shuffle.py +++ b/test/legacy_test/test_channel_shuffle.py @@ -120,8 +120,8 @@ def test_static_graph_functional(self): use_prune=True, ) - assert np.allclose(res_1, self.out_1_np) - assert np.allclose(res_2, self.out_2_np) + np.testing.assert_allclose(res_1[0], self.out_1_np) + np.testing.assert_allclose(res_2[0], self.out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): @@ -160,8 +160,8 @@ def test_static_graph_layer(self): use_prune=True, ) - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + np.testing.assert_allclose(res_1[0], out_1_np) + np.testing.assert_allclose(res_2[0], out_2_np) def run_dygraph(self, groups, data_format): n, c, h, w = 2, 9, 4, 4 diff --git a/test/legacy_test/test_concat_op.py b/test/legacy_test/test_concat_op.py index 1176ba32b20d4..db848d7b5cff4 100644 --- a/test/legacy_test/test_concat_op.py +++ b/test/legacy_test/test_concat_op.py @@ -472,9 +472,15 @@ def test_fluid_api(self): feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) + np.testing.assert_array_equal( + res_1, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_2, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_3, np.concatenate((input_2, input_3), axis=1) + ) def test_api(self): paddle.enable_static() @@ -501,10 +507,18 @@ def test_api(self): feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, fetch_list=[out_1, out_2, out_3, out_4], ) - assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) - assert np.array_equal(res_4, np.concatenate((input_2, input_3), axis=1)) + np.testing.assert_array_equal( + res_1, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_2, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_3, np.concatenate((input_2, input_3), axis=1) + ) + np.testing.assert_array_equal( + res_4, np.concatenate((input_2, input_3), axis=1) + ) def test_imperative(self): in1 = np.array([[1, 2, 3], [4, 5, 6]]) diff --git a/test/legacy_test/test_cuda_graph_partial_graph_static_run.py b/test/legacy_test/test_cuda_graph_partial_graph_static_run.py index 822bef54b5a63..2e301bdbd94da 100644 --- a/test/legacy_test/test_cuda_graph_partial_graph_static_run.py +++ b/test/legacy_test/test_cuda_graph_partial_graph_static_run.py @@ -124,7 +124,7 @@ def test_static_mode_cuda_graph(self): x_data = np.random.random((3, 10)).astype('float32') cuda_graph_rst = self.run_with_cuda_graph(x_data) normal_run_rst = self.normal_run(x_data) - assert np.array_equal(cuda_graph_rst, normal_run_rst) + np.testing.assert_array_equal(cuda_graph_rst, normal_run_rst) if __name__ == "__main__": diff --git a/test/legacy_test/test_detach.py b/test/legacy_test/test_detach.py index a17178ac28716..4b3d25a1cde73 100644 --- a/test/legacy_test/test_detach.py +++ b/test/legacy_test/test_detach.py @@ -172,7 +172,9 @@ def test_NoDetachMulti_DetachMulti(self): def test_NoDetachSingle_DetachMulti(self): array_no_detach_single = self.no_detach_single() array_detach_multi = self.detach_multi() - assert np.array_equal(array_no_detach_single, array_detach_multi) + np.testing.assert_array_equal( + array_no_detach_single, array_detach_multi + ) class TestInplace(unittest.TestCase): diff --git a/test/legacy_test/test_egr_python_api.py b/test/legacy_test/test_egr_python_api.py index 8e7d07f8d2f41..10f86571af101 100644 --- a/test/legacy_test/test_egr_python_api.py +++ b/test/legacy_test/test_egr_python_api.py @@ -844,6 +844,7 @@ def test_set_value(self): ori_place = egr_tensor.place new_arr = np.random.rand(4, 16, 16, 32).astype('float32') + self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr)) egr_tensor.set_value(new_arr) @@ -964,6 +965,7 @@ def test_set_value(self): linear = paddle.nn.Linear(1, 3) ori_place = linear.weight.place new_weight = np.ones([1, 3]).astype('float32') + self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight)) linear.weight.set_value(new_weight) diff --git a/test/legacy_test/test_einsum_v2.py b/test/legacy_test/test_einsum_v2.py index 434c59b5b804e..6b4bf6958b946 100644 --- a/test/legacy_test/test_einsum_v2.py +++ b/test/legacy_test/test_einsum_v2.py @@ -581,7 +581,7 @@ def test_shape(self): A = paddle.to_tensor(np.array([1.0, 2.0])) A_expect = paddle.to_tensor([[1.0, 0.0], [0.0, 2.0]]) A_actual = paddle.einsum('i->ii', A) - assert np.array_equal(A_expect.numpy(), A_actual.numpy()) + np.testing.assert_array_equal(A_expect.numpy(), A_actual.numpy()) class TestSimpleUndiagonal2(unittest.TestCase): @@ -595,7 +595,7 @@ def test_shape(self): B = paddle.to_tensor(np.array([1.0, 1.0])) A_expect = paddle.to_tensor([[2.0, 0.0], [0.0, 4.0]]) A_actual = paddle.einsum('i,j->ii', A, B) - assert np.array_equal(A_expect.numpy(), A_actual.numpy()) + np.testing.assert_array_equal(A_expect.numpy(), A_actual.numpy()) class TestSimpleComplexGrad(unittest.TestCase): diff --git a/test/legacy_test/test_expand_as_v2_op.py b/test/legacy_test/test_expand_as_v2_op.py index db866144eaf96..68c9801acb4d8 100755 --- a/test/legacy_test/test_expand_as_v2_op.py +++ b/test/legacy_test/test_expand_as_v2_op.py @@ -280,7 +280,7 @@ def test_api(self): feed={"x": input1, "target_tensor": input2}, fetch_list=[out_1], ) - assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) + np.testing.assert_array_equal(res_1[0], np.tile(input1, (2, 1, 1))) if __name__ == "__main__": diff --git a/test/legacy_test/test_expand_v2_op.py b/test/legacy_test/test_expand_v2_op.py index 128bdda6da019..9b9f6b631a2bf 100644 --- a/test/legacy_test/test_expand_v2_op.py +++ b/test/legacy_test/test_expand_v2_op.py @@ -318,9 +318,9 @@ def test_api(self): }, fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) class TestExpandInferShape(unittest.TestCase): diff --git a/test/legacy_test/test_fill_constant_op.py b/test/legacy_test/test_fill_constant_op.py index e74b24c6e5c19..614cd29668d88 100644 --- a/test/legacy_test/test_fill_constant_op.py +++ b/test/legacy_test/test_fill_constant_op.py @@ -338,14 +338,30 @@ def test_api(self): fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], ) - assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_4, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_7, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_8, np.full([1, 2], 1.1, dtype="float32")) + np.testing.assert_array_equal( + res_1, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_2, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_3, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_4, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_5, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_6, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_7, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_8, np.full([1, 2], 1.1, dtype="float32") + ) class TestFillConstantImperative(unittest.TestCase): @@ -369,16 +385,16 @@ def test_api(self): res4 = paddle.tensor.fill_constant( shape=shape, dtype='int32', value=value ) - assert np.array_equal( + np.testing.assert_array_equal( res1.numpy(), np.full([1, 2], 1.1, dtype="float32") ) - assert np.array_equal( + np.testing.assert_array_equal( res2.numpy(), np.full([1, 2], 1.1, dtype="float32") ) - assert np.array_equal( + np.testing.assert_array_equal( res3.numpy(), np.full([1, 2], 1.1, dtype="float32") ) - assert np.array_equal( + np.testing.assert_array_equal( res4.numpy(), np.full([1, 2], 88, dtype="int32") ) diff --git a/test/legacy_test/test_flatten_op.py b/test/legacy_test/test_flatten_op.py index 76f8ef4a1a462..0803db60c73c0 100644 --- a/test/legacy_test/test_flatten_op.py +++ b/test/legacy_test/test_flatten_op.py @@ -90,7 +90,7 @@ def test_fp16_with_gpu(self): fetch_list=[y], ) - assert np.array_equal(res[0].shape, [12 * 14]) + np.testing.assert_array_equal(res[0].shape, [12 * 14]) if __name__ == "__main__": diff --git a/test/legacy_test/test_full_op.py b/test/legacy_test/test_full_op.py index 473289447410a..9a5c95044927f 100644 --- a/test/legacy_test/test_full_op.py +++ b/test/legacy_test/test_full_op.py @@ -74,13 +74,27 @@ def test_api(self): fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], ) - assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_4, np.full([1, 2], 1.2, dtype="float32")) - assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res_7, np.full([1, 2], 1.1, dtype="float32")) + np.testing.assert_array_equal( + res_1, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_2, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_3, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_4, np.full([1, 2], 1.2, dtype="float32") + ) + np.testing.assert_array_equal( + res_5, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_6, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + res_7, np.full([1, 2], 1.1, dtype="float32") + ) def test_api_eager(self): with fluid.dygraph.base.guard(): @@ -134,18 +148,36 @@ def test_api_eager(self): out_7, dtype=np.float32, fill_value=np.abs(1.1) ) - assert np.array_equal(out_1, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_2, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_3, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_4, np.full([1, 2], 1.2, dtype="float32")) - assert np.array_equal(out_5, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_6, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_7, np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32")) - assert np.array_equal( + np.testing.assert_array_equal( + out_1, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_2, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_3, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_4, np.full([1, 2], 1.2, dtype="float32") + ) + np.testing.assert_array_equal( + out_5, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_6, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_7, np.full([1, 2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( + out_8, np.full([2], 1.1, dtype="float32") + ) + np.testing.assert_array_equal( out_9, np.full([2, 2, 4], 1.1, dtype="float32") ) - assert np.array_equal(out_10, np.full([1, 2], 1.1, dtype="float32")) + np.testing.assert_array_equal( + out_10, np.full([1, 2], 1.1, dtype="float32") + ) class TestFullOpError(unittest.TestCase): diff --git a/test/legacy_test/test_fused_attention_pass.py b/test/legacy_test/test_fused_attention_pass.py index 0f64bf3458a12..263ff746c710f 100644 --- a/test/legacy_test/test_fused_attention_pass.py +++ b/test/legacy_test/test_fused_attention_pass.py @@ -185,7 +185,9 @@ def get_rst(self, use_pass=False): def test_pass(self): fused_rst = self.get_rst(use_pass=True) non_fused_rst = self.get_rst() - assert np.allclose(fused_rst, non_fused_rst) + np.testing.assert_allclose( + fused_rst, non_fused_rst, rtol=1e-5, atol=1e-8 + ) if __name__ == "__main__": diff --git a/test/legacy_test/test_fused_feedforward_pass.py b/test/legacy_test/test_fused_feedforward_pass.py index e72de143b37bf..1abbfec2201bd 100644 --- a/test/legacy_test/test_fused_feedforward_pass.py +++ b/test/legacy_test/test_fused_feedforward_pass.py @@ -165,7 +165,9 @@ def test_pass(self): self.use_dropout_2 = use_dropout_2 ret_loss = self.get_value() ret_loss_fused = self.get_value(use_pass=True) - assert np.allclose(ret_loss, ret_loss_fused) + np.testing.assert_allclose( + ret_loss, ret_loss_fused, rtol=1e-5, atol=1e-8 + ) if __name__ == "__main__": diff --git a/test/legacy_test/test_imperative_layer_children.py b/test/legacy_test/test_imperative_layer_children.py index b0fb822f48c0e..bf440d511c566 100644 --- a/test/legacy_test/test_imperative_layer_children.py +++ b/test/legacy_test/test_imperative_layer_children.py @@ -60,8 +60,8 @@ def test_func_apply_init_weight(self): self.ori_y1, self.ori_y2 = self.func_apply_init_weight() # compare ori dygraph and new egr - assert np.array_equal(self.ori_y1.numpy(), self.new_y1.numpy()) - assert np.array_equal(self.ori_y2.numpy(), self.new_y2.numpy()) + np.testing.assert_array_equal(self.ori_y1.numpy(), self.new_y1.numpy()) + np.testing.assert_array_equal(self.ori_y2.numpy(), self.new_y2.numpy()) if __name__ == '__main__': diff --git a/test/legacy_test/test_imperative_numpy_bridge.py b/test/legacy_test/test_imperative_numpy_bridge.py index 58059a295539d..0adb69adcf878 100644 --- a/test/legacy_test/test_imperative_numpy_bridge.py +++ b/test/legacy_test/test_imperative_numpy_bridge.py @@ -44,6 +44,7 @@ def test_tensor_from_numpy(self): data_np[0][0] = -1 self.assertEqual(data_np[0][0], -1) self.assertNotEqual(var2[0][0].numpy(), -1) + self.assertFalse(np.array_equal(var2.numpy(), data_np)) diff --git a/test/legacy_test/test_layers.py b/test/legacy_test/test_layers.py index ded9e08da74cf..8cea6edd1c501 100644 --- a/test/legacy_test/test_layers.py +++ b/test/legacy_test/test_layers.py @@ -637,8 +637,8 @@ def test_embeding(self): dy_rlt = emb2(base.to_variable(inp_word)) dy_rlt_value = dy_rlt.numpy() - self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) + np.testing.assert_allclose(static_rlt2[0], static_rlt) + np.testing.assert_allclose(dy_rlt_value[0], static_rlt) with self.dynamic_graph(): custom_weight = np.random.randn(dict_size, 32).astype("float32") diff --git a/test/legacy_test/test_linspace.py b/test/legacy_test/test_linspace.py index f36a5e7c8cb1b..6468ad08c8fb5 100644 --- a/test/legacy_test/test_linspace.py +++ b/test/legacy_test/test_linspace.py @@ -169,7 +169,7 @@ def test_dtype(self): res_1, res_2, res_3 = exe.run( fluid.default_main_program(), fetch_list=[out_1, out_2, out_3] ) - assert np.array_equal(res_1, res_2) + np.testing.assert_array_equal(res_1, res_2) def test_name(self): with paddle_static_guard(): diff --git a/test/legacy_test/test_logspace.py b/test/legacy_test/test_logspace.py index e68dba46fefc6..0587846bc4841 100644 --- a/test/legacy_test/test_logspace.py +++ b/test/legacy_test/test_logspace.py @@ -179,7 +179,7 @@ def test_dtype(self): exe = paddle.static.Executor() res_1, res_2 = exe.run(prog, fetch_list=[out_1, out_2]) - assert np.array_equal(res_1, res_2) + np.testing.assert_array_equal(res_1, res_2) paddle.disable_static() def test_name(self): diff --git a/test/legacy_test/test_lrn_op.py b/test/legacy_test/test_lrn_op.py index ff087fa44823e..df9b1ebccf481 100644 --- a/test/legacy_test/test_lrn_op.py +++ b/test/legacy_test/test_lrn_op.py @@ -371,7 +371,7 @@ def test_static_fp16_gpu(self): fetch_list=[y], ) - assert np.array_equal(res[0].shape, input.shape) + np.testing.assert_array_equal(res[0].shape, input.shape) if __name__ == "__main__": diff --git a/test/legacy_test/test_meshgrid_op.py b/test/legacy_test/test_meshgrid_op.py index 377699e3855ec..d2f7b0c2eca89 100644 --- a/test/legacy_test/test_meshgrid_op.py +++ b/test/legacy_test/test_meshgrid_op.py @@ -162,8 +162,8 @@ def test_api(self): feed={'x': input_1, 'y': input_2}, fetch_list=[grid_x, grid_y], ) - assert np.array_equal(res_1, out_1) - assert np.array_equal(res_2, out_2) + np.testing.assert_array_equal(res_1, out_1) + np.testing.assert_array_equal(res_2, out_2) class TestMeshgridOp4(unittest.TestCase): @@ -199,8 +199,8 @@ def test_list_input(self): fetch_list=[grid_x, grid_y], ) - assert np.array_equal(res_1, out_1) - assert np.array_equal(res_2, out_2) + np.testing.assert_array_equal(res_1, out_1) + np.testing.assert_array_equal(res_2, out_2) class TestMeshgridOp5(unittest.TestCase): @@ -236,8 +236,8 @@ def test_tuple_input(self): fetch_list=[grid_x, grid_y], ) - assert np.array_equal(res_1, out_1) - assert np.array_equal(res_2, out_2) + np.testing.assert_array_equal(res_1, out_1) + np.testing.assert_array_equal(res_2, out_2) class TestMeshgridOp6(unittest.TestCase): @@ -262,8 +262,8 @@ def test_api_with_dygraph(self): tensor_4 = fluid.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid(tensor_3, tensor_4) - assert np.array_equal(res_3.shape, [100, 200]) - assert np.array_equal(res_4.shape, [100, 200]) + np.testing.assert_array_equal(res_3.shape, [100, 200]) + np.testing.assert_array_equal(res_4.shape, [100, 200]) class TestMeshgridOp7(unittest.TestCase): @@ -288,8 +288,8 @@ def test_api_with_dygraph_list_input(self): tensor_4 = fluid.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid([tensor_3, tensor_4]) - assert np.array_equal(res_3.shape, [100, 200]) - assert np.array_equal(res_4.shape, [100, 200]) + np.testing.assert_array_equal(res_3.shape, [100, 200]) + np.testing.assert_array_equal(res_4.shape, [100, 200]) class TestMeshgridOp8(unittest.TestCase): @@ -314,8 +314,8 @@ def test_api_with_dygraph_tuple_input(self): tensor_4 = fluid.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid((tensor_3, tensor_4)) - assert np.array_equal(res_3.shape, [100, 200]) - assert np.array_equal(res_4.shape, [100, 200]) + np.testing.assert_array_equal(res_3.shape, [100, 200]) + np.testing.assert_array_equal(res_4.shape, [100, 200]) class TestMeshGrid_ZeroDim(TestMeshgridOp): diff --git a/test/legacy_test/test_multiprocess_dataloader_dataset.py b/test/legacy_test/test_multiprocess_dataloader_dataset.py index 9e2b89b12860c..7eb99ef7006e7 100755 --- a/test/legacy_test/test_multiprocess_dataloader_dataset.py +++ b/test/legacy_test/test_multiprocess_dataloader_dataset.py @@ -84,8 +84,8 @@ def run_main(self, num_workers, places): assert label.shape == [1, 1] assert isinstance(input, fluid.core.eager.Tensor) assert isinstance(label, fluid.core.eager.Tensor) - assert np.allclose(input.numpy(), input_np[i]) - assert np.allclose(label.numpy(), label_np[i]) + np.testing.assert_allclose(input.numpy(), input_np[i]) + np.testing.assert_allclose(label.numpy(), label_np[i]) def test_main(self): places = [paddle.CPUPlace()] @@ -109,10 +109,10 @@ def test_main(self): input1, label1, input2, label2 = dataset[i] input1_t, label1_t = dataset1[i] input2_t, label2_t = dataset2[i] - assert np.allclose(input1, input1_t) - assert np.allclose(label1, label1_t) - assert np.allclose(input2, input2_t) - assert np.allclose(label2, label2_t) + np.testing.assert_allclose(input1, input1_t) + np.testing.assert_allclose(label1, label1_t) + np.testing.assert_allclose(input2, input2_t) + np.testing.assert_allclose(label2, label2_t) class TestRandomSplitApi(unittest.TestCase): @@ -226,12 +226,12 @@ def run_main(self, num_workers, places): idx = 0 for image, label in iter(dataset1): - assert np.allclose(image, samples[idx][0]) - assert np.allclose(label, samples[idx][1]) + np.testing.assert_allclose(image, samples[idx][0]) + np.testing.assert_allclose(label, samples[idx][1]) idx += 1 for image, label in iter(dataset2): - assert np.allclose(image, samples[idx][0]) - assert np.allclose(label, samples[idx][1]) + np.testing.assert_allclose(image, samples[idx][0]) + np.testing.assert_allclose(label, samples[idx][1]) idx += 1 def test_main(self): diff --git a/test/legacy_test/test_nan_to_num_op.py b/test/legacy_test/test_nan_to_num_op.py index 2aad0ff5bdac3..a5e0bbe62f4e2 100644 --- a/test/legacy_test/test_nan_to_num_op.py +++ b/test/legacy_test/test_nan_to_num_op.py @@ -79,10 +79,10 @@ def test_static(self): exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': x_np}, fetch_list=[out1, out2, out3, out4]) - self.assertTrue(np.allclose(out1_np, res[0])) - self.assertTrue(np.allclose(out2_np, res[1])) - self.assertTrue(np.allclose(out3_np, res[2])) - self.assertTrue(np.allclose(out4_np, res[3])) + np.testing.assert_allclose(out1_np, res[0]) + np.testing.assert_allclose(out2_np, res[1]) + np.testing.assert_allclose(out3_np, res[2]) + np.testing.assert_allclose(out4_np, res[3]) def test_dygraph(self): paddle.disable_static(place=self.place) @@ -97,23 +97,23 @@ def test_dygraph(self): out_tensor = paddle.nan_to_num(x_tensor) out_np = np_nan_to_num(x_np) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, None, None) out_np = np_nan_to_num(x_np, 1, None, None) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, 2.0, None) out_np = np_nan_to_num(x_np, 1, 2, None) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, None, -10.0) out_np = np_nan_to_num(x_np, 1, None, -10) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) out_tensor = paddle.nan_to_num(x_tensor, 1.0, 100.0, -10.0) out_np = np_nan_to_num(x_np, 1, 100, -10) - self.assertTrue(np.allclose(out_tensor.numpy(), out_np)) + np.testing.assert_allclose(out_tensor.numpy(), out_np) paddle.enable_static() @@ -128,7 +128,7 @@ def test_check_grad(self): dx = paddle.grad(y, x_tensor)[0].numpy() np_grad = np_nan_to_num_grad(x_np, np.ones_like(x_np)) - self.assertTrue(np.allclose(np_grad, dx)) + np.testing.assert_allclose(np_grad, dx) paddle.enable_static() diff --git a/test/legacy_test/test_number_count_op.py b/test/legacy_test/test_number_count_op.py index d2a1dcbf52824..07185a8dfeefc 100644 --- a/test/legacy_test/test_number_count_op.py +++ b/test/legacy_test/test_number_count_op.py @@ -71,7 +71,7 @@ def test_api_dygraph(self): paddle.disable_static() x = paddle.to_tensor(self.x) out = utils._number_count(x, self.upper_num) - assert np.allclose(out.numpy(), self.out) + np.testing.assert_allclose(out.numpy(), self.out) if __name__ == '__main__': diff --git a/test/legacy_test/test_numel_op.py b/test/legacy_test/test_numel_op.py index 9d87d242a87f8..b4b18ccbe07f7 100644 --- a/test/legacy_test/test_numel_op.py +++ b/test/legacy_test/test_numel_op.py @@ -120,10 +120,10 @@ def test_numel_static(self): }, fetch_list=[out_1, out_2], ) - assert np.array_equal( + np.testing.assert_array_equal( res_1, np.array(np.size(input_1)).astype("int64") ) - assert np.array_equal( + np.testing.assert_array_equal( res_2, np.array(np.size(input_2)).astype("int64") ) @@ -135,8 +135,8 @@ def test_numel_imperative(self): x_2 = paddle.to_tensor(input_2) out_1 = paddle.numel(x_1) out_2 = paddle.numel(x_2) - assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) - assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) + np.testing.assert_array_equal(out_1.numpy().item(0), np.size(input_1)) + np.testing.assert_array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/test/legacy_test/test_pixel_shuffle_op.py b/test/legacy_test/test_pixel_shuffle_op.py index c20ba8678d806..7c6f18479fd11 100644 --- a/test/legacy_test/test_pixel_shuffle_op.py +++ b/test/legacy_test/test_pixel_shuffle_op.py @@ -186,17 +186,17 @@ def test_static_graph_functional(self): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, self.out_1_np) - assert np.allclose(res_2, self.out_2_np) + np.testing.assert_allclose(res_1, self.out_1_np) + np.testing.assert_allclose(res_2, self.out_2_np) def test_api_fp16(self): paddle.enable_static() @@ -226,15 +226,15 @@ def test_api_fp16(self): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + )[0] + np.testing.assert_allclose(res_1, out_1_np) + np.testing.assert_allclose(res_2, out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): @@ -264,17 +264,17 @@ def test_static_graph_layer(self): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + np.testing.assert_allclose(res_1, out_1_np, rtol=1e-5, atol=1e-8) + np.testing.assert_allclose(res_2, out_2_np, rtol=1e-5, atol=1e-8) def run_dygraph(self, up_factor, data_format): n, c, h, w = 2, 9, 4, 4 diff --git a/test/legacy_test/test_pixel_unshuffle.py b/test/legacy_test/test_pixel_unshuffle.py index b2cfd457603c4..2353ca0192c7e 100644 --- a/test/legacy_test/test_pixel_unshuffle.py +++ b/test/legacy_test/test_pixel_unshuffle.py @@ -225,17 +225,17 @@ def test_static_graph_functional(self): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, self.out_1_np) - assert np.allclose(res_2, self.out_2_np) + np.testing.assert_allclose(res_1, self.out_1_np) + np.testing.assert_allclose(res_2, self.out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): @@ -267,17 +267,17 @@ def test_static_graph_layer(self): feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, - ) + )[0] res_2 = exe.run( fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, - ) + )[0] - assert np.allclose(res_1, out_1_np) - assert np.allclose(res_2, out_2_np) + np.testing.assert_allclose(res_1, out_1_np) + np.testing.assert_allclose(res_2, out_2_np) def run_dygraph(self, down_factor, data_format): '''run_dygraph''' diff --git a/test/legacy_test/test_pool3d_api.py b/test/legacy_test/test_pool3d_api.py index 80f22f1467e27..a9e849fb91d41 100644 --- a/test/legacy_test/test_pool3d_api.py +++ b/test/legacy_test/test_pool3d_api.py @@ -391,7 +391,7 @@ def test_static_fp16_gpu(self): fetch_list=[y], ) - assert np.array_equal(res[0].shape, [1, 2, 1, 16, 16]) + np.testing.assert_array_equal(res[0].shape, [1, 2, 1, 16, 16]) def test_static_bf16_gpu(self): paddle.enable_static() @@ -421,7 +421,7 @@ def test_static_bf16_gpu(self): fetch_list=[y], ) - assert np.array_equal(res[0].shape, [1, 2, 1, 16, 16]) + np.testing.assert_array_equal(res[0].shape, [1, 2, 1, 16, 16]) class TestPool3DError_API(unittest.TestCase): diff --git a/test/legacy_test/test_prune_gate_by_capacity_op.py b/test/legacy_test/test_prune_gate_by_capacity_op.py index e52d67185e374..d6103be8d13d5 100644 --- a/test/legacy_test/test_prune_gate_by_capacity_op.py +++ b/test/legacy_test/test_prune_gate_by_capacity_op.py @@ -62,7 +62,7 @@ def prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker): def assert_allclose(output, expected, n_expert): c1 = count(output, n_expert) c2 = count(expected, n_expert) - assert np.allclose(c1, c2) + np.testing.assert_allclose(c1, c2) @unittest.skipIf( diff --git a/test/legacy_test/test_random_routing_op.py b/test/legacy_test/test_random_routing_op.py index e7f0412eecfef..8977e30011044 100644 --- a/test/legacy_test/test_random_routing_op.py +++ b/test/legacy_test/test_random_routing_op.py @@ -59,7 +59,7 @@ def test_api_dygraph(self): value = paddle.to_tensor(self.topk_value) prob = paddle.to_tensor(self.prob) out = utils._random_routing(x, value, prob) - assert np.allclose(out.numpy(), self.out) + np.testing.assert_allclose(out.numpy(), self.out) @unittest.skipIf( diff --git a/test/legacy_test/test_reshape_op.py b/test/legacy_test/test_reshape_op.py index 2feecb5005b14..dc85f407aceab 100755 --- a/test/legacy_test/test_reshape_op.py +++ b/test/legacy_test/test_reshape_op.py @@ -400,10 +400,10 @@ def _test_api(self): fetch_list=[out_1, out_2, out_3, out_4], ) - assert np.array_equal(res_1, input.reshape(shape)) - assert np.array_equal(res_2, input.reshape(shape)) - assert np.array_equal(res_3, input.reshape([5, 10])) - assert np.array_equal(res_4, input.reshape(shape)) + np.testing.assert_array_equal(res_1, input.reshape(shape)) + np.testing.assert_array_equal(res_2, input.reshape(shape)) + np.testing.assert_array_equal(res_3, input.reshape([5, 10])) + np.testing.assert_array_equal(res_4, input.reshape(shape)) def test_paddle_api(self): self._set_paddle_api() @@ -424,9 +424,9 @@ def test_imperative(self): shape_tensor = self.to_tensor(np.array([2, 5, 5]).astype("int32")) out_3 = self.reshape(x, shape=shape_tensor) - assert np.array_equal(out_1.numpy(), input.reshape(shape)) - assert np.array_equal(out_2.numpy(), input.reshape([5, 10])) - assert np.array_equal(out_3.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_1.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_2.numpy(), input.reshape([5, 10])) + np.testing.assert_array_equal(out_3.numpy(), input.reshape(shape)) class TestStaticReshape_(TestReshapeAPI): @@ -448,9 +448,9 @@ def test_imperative(self): shape_tensor = self.to_tensor(np.array([2, 5, 5]).astype("int32")) out_3 = self.reshape(x, shape=shape_tensor) - assert np.array_equal(out_1.numpy(), input.reshape(shape)) - assert np.array_equal(out_2.numpy(), input.reshape(shape)) - assert np.array_equal(out_3.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_1.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_2.numpy(), input.reshape(shape)) + np.testing.assert_array_equal(out_3.numpy(), input.reshape(shape)) # Test Input Error diff --git a/test/legacy_test/test_size_op.py b/test/legacy_test/test_size_op.py index dfff90b742fca..0bb3ac64bce75 100644 --- a/test/legacy_test/test_size_op.py +++ b/test/legacy_test/test_size_op.py @@ -83,10 +83,10 @@ def test_size_static(self): }, fetch_list=[out_1, out_2], ) - assert np.array_equal( + np.testing.assert_array_equal( res_1, np.array(np.size(input_1)).astype("int64") ) - assert np.array_equal( + np.testing.assert_array_equal( res_2, np.array(np.size(input_2)).astype("int64") ) @@ -98,8 +98,8 @@ def test_size_imperative(self): x_2 = paddle.to_tensor(input_2) out_1 = paddle.numel(x_1) out_2 = paddle.numel(x_2) - assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) - assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) + np.testing.assert_array_equal(out_1.numpy().item(0), np.size(input_1)) + np.testing.assert_array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/test/legacy_test/test_slice_op.py b/test/legacy_test/test_slice_op.py index 9e6ebd6f2a186..ced40c38605a9 100644 --- a/test/legacy_test/test_slice_op.py +++ b/test/legacy_test/test_slice_op.py @@ -631,13 +631,13 @@ def test_1(self): fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], ) - assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_7, input[-1, 0:100, :, 2:-1]) class TestSliceApiWithTensor(unittest.TestCase): diff --git a/test/legacy_test/test_sparse_conv_op.py b/test/legacy_test/test_sparse_conv_op.py index 7ca55d58a6a4b..2d2af3c11fcb6 100644 --- a/test/legacy_test/test_sparse_conv_op.py +++ b/test/legacy_test/test_sparse_conv_op.py @@ -94,7 +94,7 @@ def test_conv3d(self): ) out.backward(out) out = paddle.sparse.coalesce(out) - assert np.array_equal(correct_out_values, out.values().numpy()) + np.testing.assert_array_equal(correct_out_values, out.values().numpy()) def test_subm_conv2d(self): indices = [[0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] @@ -126,7 +126,9 @@ def test_subm_conv3d(self): y = paddle.sparse.nn.functional.subm_conv3d( sparse_x, weight, key='subm_conv' ) - assert np.array_equal(sparse_x.indices().numpy(), y.indices().numpy()) + np.testing.assert_array_equal( + sparse_x.indices().numpy(), y.indices().numpy() + ) def test_Conv2D(self): # (3, non_zero_num), 3-D:(N, H, W) @@ -223,7 +225,7 @@ def test_SubmConv3D(self): sparse_out = subm_conv3d(sparse_input) # the output shape of subm_conv is same as input shape - assert np.array_equal(indices, sparse_out.indices().numpy()) + np.testing.assert_array_equal(indices, sparse_out.indices().numpy()) # test errors with self.assertRaises(ValueError): @@ -294,14 +296,16 @@ def test_Conv3D_bias(self): dense_out = sp_out.to_dense() sp_loss = dense_out.mean() sp_loss.backward() - assert np.allclose(out.numpy(), dense_out.numpy(), atol=1e-3, rtol=1e-3) - assert np.allclose( + np.testing.assert_allclose( + out.numpy(), dense_out.numpy(), atol=1e-3, rtol=1e-3 + ) + np.testing.assert_allclose( conv3d.weight.grad.numpy().transpose(2, 3, 4, 1, 0), sp_conv3d.weight.grad.numpy(), atol=1e-3, rtol=1e-3, ) - assert np.allclose( + np.testing.assert_allclose( conv3d.bias.grad.numpy(), sp_conv3d.bias.grad.numpy(), atol=1e-5, diff --git a/test/legacy_test/test_sparse_copy_op.py b/test/legacy_test/test_sparse_copy_op.py index 237b980650676..a97148c703fc5 100644 --- a/test/legacy_test/test_sparse_copy_op.py +++ b/test/legacy_test/test_sparse_copy_op.py @@ -30,7 +30,7 @@ def test_copy_sparse_coo(self): dense_x_2 = paddle.to_tensor(np_x_2, dtype='float32') coo_x_2 = dense_x_2.to_sparse_coo(2) coo_x_2.copy_(coo_x, True) - assert np.array_equal(np_values, coo_x_2.values().numpy()) + np.testing.assert_array_equal(np_values, coo_x_2.values().numpy()) def test_copy_sparse_csr(self): np_x = [[0, 1.0, 0], [2.0, 0, 0], [0, 3.0, 0]] @@ -42,4 +42,4 @@ def test_copy_sparse_csr(self): dense_x_2 = paddle.to_tensor(np_x_2, dtype='float32') csr_x_2 = dense_x_2.to_sparse_csr() csr_x_2.copy_(csr_x, True) - assert np.array_equal(np_values, csr_x_2.values().numpy()) + np.testing.assert_array_equal(np_values, csr_x_2.values().numpy()) diff --git a/test/legacy_test/test_sparse_model.py b/test/legacy_test/test_sparse_model.py index 2b7c646b3ab49..9e71757f90342 100644 --- a/test/legacy_test/test_sparse_model.py +++ b/test/legacy_test/test_sparse_model.py @@ -54,13 +54,19 @@ def test(self): sparse_loss = sparse_out.values().mean() sparse_loss.backward(retain_graph=True) - assert np.allclose(dense_out.numpy(), sparse_out.to_dense().numpy()) - assert np.allclose(x.grad.numpy(), sparse_x.grad.to_dense().numpy()) + np.testing.assert_allclose( + dense_out.numpy(), sparse_out.to_dense().numpy() + ) + np.testing.assert_allclose( + x.grad.numpy(), sparse_x.grad.to_dense().numpy() + ) loss.backward() sparse_loss.backward() - assert np.allclose(x.grad.numpy(), sparse_x.grad.to_dense().numpy()) + np.testing.assert_allclose( + x.grad.numpy(), sparse_x.grad.to_dense().numpy() + ) if __name__ == "__main__": diff --git a/test/legacy_test/test_sparse_norm_op.py b/test/legacy_test/test_sparse_norm_op.py index c17a252ee75a6..ca513ef38aa1f 100644 --- a/test/legacy_test/test_sparse_norm_op.py +++ b/test/legacy_test/test_sparse_norm_op.py @@ -48,7 +48,7 @@ def test(self): sparse_y = sparse_batch_norm(sparse_x) # compare the result with dense batch_norm - assert np.allclose( + np.testing.assert_allclose( dense_y.flatten().numpy(), sparse_y.values().flatten().numpy(), atol=1e-5, @@ -57,7 +57,7 @@ def test(self): # test backward sparse_y.backward(sparse_y) - assert np.allclose( + np.testing.assert_allclose( dense_x.grad.flatten().numpy(), sparse_x.grad.values().flatten().numpy(), atol=1e-5, @@ -85,7 +85,9 @@ def test2(self): dense_bn = paddle.nn.BatchNorm1D(channels) dense_x = dense_x.reshape((-1, dense_x.shape[-1])) dense_out = dense_bn(dense_x) - assert np.allclose(dense_out.numpy(), batch_norm_out.values().numpy()) + np.testing.assert_allclose( + dense_out.numpy(), batch_norm_out.values().numpy() + ) # [1, 6, 6, 6, 3] def check(self, shape): @@ -137,7 +139,7 @@ def test_sync_batch_norm(self): dense_sync_bn = paddle.nn.SyncBatchNorm(2) x = x.reshape((-1, x.shape[-1])) dense_hidden = dense_sync_bn(x) - assert np.allclose( + np.testing.assert_allclose( sparse_hidden.values().numpy(), dense_hidden.numpy() ) diff --git a/test/legacy_test/test_sparse_pooling_op.py b/test/legacy_test/test_sparse_pooling_op.py index 1a031329fa584..f8d0cabd304ff 100644 --- a/test/legacy_test/test_sparse_pooling_op.py +++ b/test/legacy_test/test_sparse_pooling_op.py @@ -64,8 +64,10 @@ def test(self): dense_out.backward(dense_out) # compare with dense - assert np.allclose(dense_out.numpy(), out.numpy()) - assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy()) + np.testing.assert_allclose(dense_out.numpy(), out.numpy()) + np.testing.assert_allclose( + dense_x.grad.numpy(), self.dense_x.grad.numpy() + ) class TestStride(TestMaxPool3DFunc): @@ -111,7 +113,7 @@ def test(self): dense_out = paddle.nn.functional.max_pool3d( dense_x, 3, data_format='NDHWC' ) - assert np.allclose(dense_out.numpy(), out.numpy()) + np.testing.assert_allclose(dense_out.numpy(), out.numpy()) if __name__ == "__main__": diff --git a/test/legacy_test/test_sparse_utils_op.py b/test/legacy_test/test_sparse_utils_op.py index 60cf3a7a5208e..2b7583db92175 100644 --- a/test/legacy_test/test_sparse_utils_op.py +++ b/test/legacy_test/test_sparse_utils_op.py @@ -33,17 +33,17 @@ def test_create_coo_by_tensor(self): dense_indices, dense_elements, dense_shape, stop_gradient=False ) # test the to_string.py - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_create_coo_by_np(self): indices = [[0, 1, 2], [1, 2, 0]] values = [1.0, 2.0, 3.0] dense_shape = [3, 3] coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape) - assert np.array_equal(3, coo.nnz()) - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(3, coo.nnz()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_create_csr_by_tensor(self): crows = [0, 2, 3, 5] @@ -69,10 +69,10 @@ def test_create_csr_by_np(self): dense_shape = [3, 4] csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, dense_shape) # test the to_string.py - assert np.array_equal(5, csr.nnz()) - assert np.array_equal(crows, csr.crows().numpy()) - assert np.array_equal(cols, csr.cols().numpy()) - assert np.array_equal(values, csr.values().numpy()) + np.testing.assert_array_equal(5, csr.nnz()) + np.testing.assert_array_equal(crows, csr.crows().numpy()) + np.testing.assert_array_equal(cols, csr.cols().numpy()) + np.testing.assert_array_equal(values, csr.values().numpy()) def test_place(self): place = core.CPUPlace() @@ -132,8 +132,8 @@ def test_to_sparse_coo(self): values = [1.0, 2.0, 3.0, 4.0, 5.0] dense_x = paddle.to_tensor(x, dtype='float32', stop_gradient=False) out = dense_x.to_sparse_coo(2) - assert np.array_equal(out.indices().numpy(), indices) - assert np.array_equal(out.values().numpy(), values) + np.testing.assert_array_equal(out.indices().numpy(), indices) + np.testing.assert_array_equal(out.values().numpy(), values) # test to_sparse_coo_grad backward out_grad_indices = [[0, 1], [0, 1]] out_grad_values = [2.0, 3.0] @@ -144,7 +144,9 @@ def test_to_sparse_coo(self): stop_gradient=True, ) out.backward(out_grad) - assert np.array_equal(dense_x.grad.numpy(), out_grad.to_dense().numpy()) + np.testing.assert_array_equal( + dense_x.grad.numpy(), out_grad.to_dense().numpy() + ) def test_coo_to_dense(self): indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] @@ -168,7 +170,7 @@ def test_coo_to_dense(self): dense_tensor.backward(paddle.to_tensor(out_grad)) # mask the out_grad by sparse_x.indices() correct_x_grad = [2.0, 4.0, 7.0, 9.0, 10.0] - assert np.array_equal( + np.testing.assert_array_equal( correct_x_grad, sparse_x.grad.values().numpy() ) @@ -182,7 +184,7 @@ def test_coo_to_dense(self): sparse_x_cpu.retain_grads() dense_tensor_cpu = sparse_x_cpu.to_dense() dense_tensor_cpu.backward(paddle.to_tensor(out_grad)) - assert np.array_equal( + np.testing.assert_array_equal( correct_x_grad, sparse_x_cpu.grad.values().numpy() ) @@ -193,12 +195,12 @@ def test_to_sparse_csr(self): values = [1, 2, 3, 4, 5] dense_x = paddle.to_tensor(x) out = dense_x.to_sparse_csr() - assert np.array_equal(out.crows().numpy(), crows) - assert np.array_equal(out.cols().numpy(), cols) - assert np.array_equal(out.values().numpy(), values) + np.testing.assert_array_equal(out.crows().numpy(), crows) + np.testing.assert_array_equal(out.cols().numpy(), cols) + np.testing.assert_array_equal(out.values().numpy(), values) dense_tensor = out.to_dense() - assert np.array_equal(dense_tensor.numpy(), x) + np.testing.assert_array_equal(dense_tensor.numpy(), x) def test_coo_values_grad(self): indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] @@ -214,7 +216,7 @@ def test_coo_values_grad(self): out_grad = [2.0, 3.0, 5.0, 8.0, 9.0] # test coo_values_grad values_tensor.backward(paddle.to_tensor(out_grad)) - assert np.array_equal(out_grad, sparse_x.grad.values().numpy()) + np.testing.assert_array_equal(out_grad, sparse_x.grad.values().numpy()) indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] values = [ [1.0, 1.0], @@ -240,7 +242,7 @@ def test_coo_values_grad(self): ] # test coo_values_grad values_tensor.backward(paddle.to_tensor(out_grad)) - assert np.array_equal(out_grad, sparse_x.grad.values().numpy()) + np.testing.assert_array_equal(out_grad, sparse_x.grad.values().numpy()) def test_sparse_coo_tensor_grad(self): for device in devices: @@ -266,7 +268,9 @@ def test_sparse_coo_tensor_grad(self): ) sparse_x.backward(sparse_out_grad) correct_values_grad = [0, 3] - assert np.array_equal(correct_values_grad, values.grad.numpy()) + np.testing.assert_array_equal( + correct_values_grad, values.grad.numpy() + ) # test the non-zero values is a vector values = [[1, 1], [2, 2]] @@ -283,7 +287,9 @@ def test_sparse_coo_tensor_grad(self): ) sparse_x.backward(sparse_out_grad) correct_values_grad = [[0, 0], [3, 3]] - assert np.array_equal(correct_values_grad, values.grad.numpy()) + np.testing.assert_array_equal( + correct_values_grad, values.grad.numpy() + ) def test_sparse_coo_tensor_sorted(self): for device in devices: @@ -300,10 +306,12 @@ def test_sparse_coo_tensor_sorted(self): sparse_x = paddle.sparse.coalesce(sparse_x) indices_sorted = [[0, 1], [1, 0]] values_sorted = [5.0, 1.0] - assert np.array_equal( + np.testing.assert_array_equal( indices_sorted, sparse_x.indices().numpy() ) - assert np.array_equal(values_sorted, sparse_x.values().numpy()) + np.testing.assert_array_equal( + values_sorted, sparse_x.values().numpy() + ) # test the non-zero values is a vector values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]] @@ -311,16 +319,18 @@ def test_sparse_coo_tensor_sorted(self): sparse_x = paddle.sparse.sparse_coo_tensor(indices, values) sparse_x = paddle.sparse.coalesce(sparse_x) values_sorted = [[5.0, 5.0], [1.0, 1.0]] - assert np.array_equal( + np.testing.assert_array_equal( indices_sorted, sparse_x.indices().numpy() ) - assert np.array_equal(values_sorted, sparse_x.values().numpy()) + np.testing.assert_array_equal( + values_sorted, sparse_x.values().numpy() + ) def test_batch_csr(self): def verify(dense_x): sparse_x = dense_x.to_sparse_csr() out = sparse_x.to_dense() - assert np.allclose(out.numpy(), dense_x.numpy()) + np.testing.assert_allclose(out.numpy(), dense_x.numpy()) shape = np.random.randint(low=1, high=10, size=3) shape = list(shape) diff --git a/test/legacy_test/test_split_op.py b/test/legacy_test/test_split_op.py index b6d45b4e455d8..8f7781f925cea 100644 --- a/test/legacy_test/test_split_op.py +++ b/test/legacy_test/test_split_op.py @@ -323,12 +323,12 @@ def test_api(self): ) out = np.split(input_1, [2, 3], 1) - assert np.array_equal(res_0, out[0]) - assert np.array_equal(res_1, out[1]) - assert np.array_equal(res_2, out[2]) - assert np.array_equal(res_3, out[0]) - assert np.array_equal(res_4, out[1]) - assert np.array_equal(res_5, out[2]) + np.testing.assert_array_equal(res_0, out[0]) + np.testing.assert_array_equal(res_1, out[1]) + np.testing.assert_array_equal(res_2, out[2]) + np.testing.assert_array_equal(res_3, out[0]) + np.testing.assert_array_equal(res_4, out[1]) + np.testing.assert_array_equal(res_5, out[2]) class TestSplitOpError(unittest.TestCase): diff --git a/test/legacy_test/test_strided_slice_op.py b/test/legacy_test/test_strided_slice_op.py index 3181083e6968f..dde0bbf4e5c25 100644 --- a/test/legacy_test/test_strided_slice_op.py +++ b/test/legacy_test/test_strided_slice_op.py @@ -605,13 +605,13 @@ def test_1(self): }, fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], ) - assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_5, input[-3:3, 0:100:2, -1:2:-1, :]) - assert np.array_equal(res_6, input[-3:3, 0:100:2, :, -1:2:-1]) - assert np.array_equal(res_7, input[-1, 0:100:2, :, -1:2:-1]) + np.testing.assert_array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) + np.testing.assert_array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) + np.testing.assert_array_equal(res_5, input[-3:3, 0:100:2, -1:2:-1, :]) + np.testing.assert_array_equal(res_6, input[-3:3, 0:100:2, :, -1:2:-1]) + np.testing.assert_array_equal(res_7, input[-1, 0:100:2, :, -1:2:-1]) def test_dygraph_op(self): x = paddle.zeros(shape=[3, 4, 5, 6], dtype="float32") diff --git a/test/legacy_test/test_tile_op.py b/test/legacy_test/test_tile_op.py index ab8d289aeae03..282a0a52e86e8 100644 --- a/test/legacy_test/test_tile_op.py +++ b/test/legacy_test/test_tile_op.py @@ -387,9 +387,9 @@ def test_api(self): out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) out_3 = paddle.tile(x, repeat_times=repeat_times) - assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) class TestTileDoubleGradCheck(unittest.TestCase): diff --git a/test/legacy_test/test_unbind_op.py b/test/legacy_test/test_unbind_op.py index 80c3db774a7bd..670433a84c8b5 100644 --- a/test/legacy_test/test_unbind_op.py +++ b/test/legacy_test/test_unbind_op.py @@ -38,8 +38,8 @@ def test_unbind(self): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) def test_unbind_static_fp16_gpu(self): if paddle.fluid.core.is_compiled_with_cuda(): @@ -61,8 +61,8 @@ def test_unbind_static_fp16_gpu(self): fetch_list=[y], ) - assert np.array_equal(res[0], input[0, :]) - assert np.array_equal(res[1], input[1, :]) + np.testing.assert_array_equal(res[0], input[0, :]) + np.testing.assert_array_equal(res[1], input[1, :]) def test_unbind_dygraph(self): with fluid.dygraph.guard(): @@ -96,8 +96,8 @@ def test_layers_unbind(self): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) class TestUnbindOp(OpTest): diff --git a/test/legacy_test/test_unsqueeze2_op.py b/test/legacy_test/test_unsqueeze2_op.py index 2ba8d1204b90b..df4115eb0c57a 100755 --- a/test/legacy_test/test_unsqueeze2_op.py +++ b/test/legacy_test/test_unsqueeze2_op.py @@ -275,11 +275,11 @@ def test_api(self): fetch_list=[out_1, out_2, out_3, out_4, out_5], ) - assert np.array_equal(res_1, input.reshape([3, 1, 1, 2, 5, 1])) - assert np.array_equal(res_2, input.reshape([3, 1, 1, 2, 5, 1])) - assert np.array_equal(res_3, input.reshape([3, 1, 1, 2, 5, 1])) - assert np.array_equal(res_4, input.reshape([3, 2, 5, 1])) - assert np.array_equal(res_5, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_1, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_2, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_3, input.reshape([3, 1, 1, 2, 5, 1])) + np.testing.assert_array_equal(res_4, input.reshape([3, 2, 5, 1])) + np.testing.assert_array_equal(res_5, input.reshape([3, 1, 1, 2, 5, 1])) def test_error(self): def test_axes_type(): diff --git a/test/legacy_test/test_update_loss_scaling_op.py b/test/legacy_test/test_update_loss_scaling_op.py index 6060236c5c5c9..56ffc0499699a 100644 --- a/test/legacy_test/test_update_loss_scaling_op.py +++ b/test/legacy_test/test_update_loss_scaling_op.py @@ -274,14 +274,20 @@ def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): ], ) - assert np.array_equal(result_v[0], a_v) - assert np.array_equal(result_v[1], b_v) - assert np.array_equal(result_v[0], result_v[2]) - assert np.array_equal(result_v[1], result_v[3]) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], a_v) + np.testing.assert_array_equal(result_v[1], b_v) + np.testing.assert_array_equal(result_v[0], result_v[2]) + np.testing.assert_array_equal(result_v[1], result_v[3]) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * incr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): with paddle_static_guard(): @@ -353,14 +359,20 @@ def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): num_bad_steps, ], ) - assert np.array_equal(result_v[0], np.zeros_like(a_v)) - assert np.array_equal(result_v[1], np.zeros_like(b_v)) - assert np.array_equal(result_v[2], np.zeros_like(a_v)) - assert np.array_equal(result_v[3], np.zeros_like(b_v)) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[1], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[2], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[3], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * decr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def test_loss_scaling_cpu(self): with paddle_static_guard(): diff --git a/test/legacy_test/test_where_op.py b/test/legacy_test/test_where_op.py index b68afe65c3a08..aa03f7276c1b3 100644 --- a/test/legacy_test/test_where_op.py +++ b/test/legacy_test/test_where_op.py @@ -166,17 +166,17 @@ def test_api(self, use_cuda=False): feed={'cond': self.cond, 'x': self.x, 'y': self.y}, fetch_list=fetch_list, ) - assert np.array_equal(out[0], self.out) + np.testing.assert_array_equal(out[0], self.out) if x_stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_x_backward(out[1]) ) if y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[3], self.ref_y_backward(out[1]) ) elif y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_y_backward(out[1]) ) @@ -202,7 +202,9 @@ def test_api_broadcast(self, use_cuda=False): feed={'x': x_i, 'y': y_i}, fetch_list=[result], ) - assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i)) + np.testing.assert_array_equal( + out[0], np.where((x_i > 1), x_i, y_i) + ) def test_scalar(self): paddle.enable_static() @@ -228,7 +230,7 @@ def test_scalar(self): fetch_list=[result], ) expect = np.where(cond_data, x_data, y_data) - assert np.array_equal(out[0], expect) + np.testing.assert_array_equal(out[0], expect) def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): paddle.enable_static() @@ -262,7 +264,7 @@ def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): fetch_list=[result], ) expect = np.where(cond_data, x_data, y_data) - assert np.array_equal(out[0], expect) + np.testing.assert_array_equal(out[0], expect) def test_static_api_broadcast_1(self): cond_shape = [2, 4] @@ -323,7 +325,9 @@ def test_api(self): y = fluid.dygraph.to_variable(y_i) cond = fluid.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) - assert np.array_equal(out.numpy(), np.where(cond_i, x_i, y_i)) + np.testing.assert_array_equal( + out.numpy(), np.where(cond_i, x_i, y_i) + ) def test_scalar(self): with fluid.dygraph.guard(): @@ -332,7 +336,7 @@ def test_scalar(self): y = 2.0 cond = fluid.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) - assert np.array_equal(out.numpy(), np.where(cond_i, x, y)) + np.testing.assert_array_equal(out.numpy(), np.where(cond_i, x, y)) def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): with fluid.dygraph.guard(): diff --git a/test/mkldnn/check_flags_mkldnn_ops_on_off.py b/test/mkldnn/check_flags_mkldnn_ops_on_off.py index 188f70bca1215..f00f3967225ca 100644 --- a/test/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/test/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -54,7 +54,7 @@ def check(): np_res = np.add(a_np, b_np) np_res = np.matmul(np_res, np.transpose(b_np, (0, 2, 1))) np_res = np.maximum(np_res, 0) - assert np.allclose(res1.numpy(), np_res, atol=1e-3) + np.testing.assert_allclose(res1.numpy(), np_res, atol=1e-3) if __name__ == '__main__': diff --git a/test/mkldnn/check_flags_use_mkldnn.py b/test/mkldnn/check_flags_use_mkldnn.py index 4c9dd32e54443..07b4829743cd6 100644 --- a/test/mkldnn/check_flags_use_mkldnn.py +++ b/test/mkldnn/check_flags_use_mkldnn.py @@ -38,7 +38,7 @@ def check(): a = fluid.dygraph.to_variable(a_np) res1 = func(a) res2 = np.maximum(a_np, 0) - assert np.array_equal(res1.numpy(), res2) + np.testing.assert_array_equal(res1.numpy(), res2) if __name__ == '__main__': diff --git a/test/quantization/test_quant2_int8_mkldnn_pass.py b/test/quantization/test_quant2_int8_mkldnn_pass.py index 61c700d23b7f4..e51da1db81ba8 100644 --- a/test/quantization/test_quant2_int8_mkldnn_pass.py +++ b/test/quantization/test_quant2_int8_mkldnn_pass.py @@ -92,7 +92,7 @@ def test_dequantize_op_weights(self): param.set(self.variables_mul["mul_weights"], self.place) qpass._dequantize_op_weights(graph, op_node, "Y", "Out") - assert np.allclose( + np.testing.assert_allclose( self.scope.find_var("mul_weights").get_tensor(), [ [ diff --git a/test/xpu/process_group_bkcl.py b/test/xpu/process_group_bkcl.py index 9c9b88862feab..e78b0bd3d98da 100644 --- a/test/xpu/process_group_bkcl.py +++ b/test/xpu/process_group_bkcl.py @@ -86,11 +86,11 @@ def test_create_process_group_bkcl(self): # XPU don't support event query by now, so just use sync op here task = dist.broadcast(tensor_x, 0) paddle.device.xpu.synchronize() - assert np.array_equal(broadcast_result, tensor_x) + np.testing.assert_array_equal(broadcast_result, tensor_x) else: task = dist.broadcast(tensor_y, 0) paddle.device.xpu.synchronize() - assert np.array_equal(broadcast_result, tensor_y) + np.testing.assert_array_equal(broadcast_result, tensor_y) sys.stdout.write(f"rank {pg.rank()}: test broadcast api ok\n") @@ -132,8 +132,8 @@ def test_create_process_group_bkcl(self): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) sys.stdout.write(f"rank {pg.rank()}: test allgather api ok\n") if pg.rank() == 0: @@ -150,8 +150,8 @@ def test_create_process_group_bkcl(self): out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] ) - assert np.array_equal(tensor_x, out_1) - assert np.array_equal(tensor_y, out_2) + np.testing.assert_array_equal(tensor_x, out_1) + np.testing.assert_array_equal(tensor_y, out_2) sys.stdout.write(f"rank {pg.rank()}: test allgather api2 ok\n") # test Reduce @@ -171,8 +171,8 @@ def test_create_process_group_bkcl(self): task.wait() paddle.device.xpu.synchronize() if pg.rank() == 0: - assert np.array_equal(tensor_x, sum_result) - assert np.array_equal(tensor_y, old_tensor_y) + np.testing.assert_array_equal(tensor_x, sum_result) + np.testing.assert_array_equal(tensor_y, old_tensor_y) sys.stdout.write(f"rank {pg.rank()}: test reduce sum api ok\n") # test reduce_scatter @@ -196,9 +196,9 @@ def test_create_process_group_bkcl(self): task.wait() paddle.device.xpu.synchronize() if pg.rank() == 0: - assert np.array_equal(need_result0, tensor_out) + np.testing.assert_array_equal(need_result0, tensor_out) else: - assert np.array_equal(need_result1, tensor_out) + np.testing.assert_array_equal(need_result1, tensor_out) sys.stdout.write(f"rank {pg.rank()}: test reduce_scatter sum api ok\n") # test send async api @@ -215,7 +215,7 @@ def test_create_process_group_bkcl(self): else: task = dist.recv(tensor_y, 0, sync_op=False) task.wait() - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) # test send sync api # rank 0 @@ -229,7 +229,7 @@ def test_create_process_group_bkcl(self): task = dist.send(tensor_x, 1, sync_op=True) else: task = dist.recv(tensor_y, 0, sync_op=True) - assert np.array_equal(tensor_y, tensor_x) + np.testing.assert_array_equal(tensor_y, tensor_x) # test send 0-d tensor # rank 0 diff --git a/test/xpu/test_expand_as_v2_op_xpu.py b/test/xpu/test_expand_as_v2_op_xpu.py index 1843748e8ae0c..41f345091054c 100644 --- a/test/xpu/test_expand_as_v2_op_xpu.py +++ b/test/xpu/test_expand_as_v2_op_xpu.py @@ -147,7 +147,7 @@ def test_api(self): feed={"x": x_np, "target_tensor": y_np}, fetch_list=[out_1], ) - assert np.array_equal(res_1[0], np.tile(x_np, (2, 1, 1))) + np.testing.assert_array_equal(res_1[0], np.tile(x_np, (2, 1, 1))) support_types = get_xpu_op_support_types('expand_as_v2') diff --git a/test/xpu/test_expand_v2_op_xpu.py b/test/xpu/test_expand_v2_op_xpu.py index 9d869d14b32e2..ad5397ff3bcb2 100644 --- a/test/xpu/test_expand_v2_op_xpu.py +++ b/test/xpu/test_expand_v2_op_xpu.py @@ -226,9 +226,9 @@ def test_static(self): fetch_list=[out_1, out_2, out_3], ) - assert np.array_equal(res_1, np.tile(input, (1, 1))) - assert np.array_equal(res_2, np.tile(input, (1, 1))) - assert np.array_equal(res_3, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) support_types = get_xpu_op_support_types('expand_v2') diff --git a/test/xpu/test_masked_select_op_xpu.py b/test/xpu/test_masked_select_op_xpu.py index 3c29714a3d3b8..f2ed82cd1e8d7 100644 --- a/test/xpu/test_masked_select_op_xpu.py +++ b/test/xpu/test_masked_select_op_xpu.py @@ -86,7 +86,7 @@ def test_imperative_mode(self): mask = paddle.to_tensor(np_mask) out = paddle.masked_select(x, mask) np_out = np_masked_select(np_x, np_mask) - self.assertEqual(np.allclose(out.numpy(), np_out), True) + np.testing.assert_allclose(out.numpy(), np_out) paddle.enable_static() def test_static_mode(self): diff --git a/test/xpu/test_sparse_utils_op_xpu.py b/test/xpu/test_sparse_utils_op_xpu.py index 37e0d39130ffa..5a282563447a6 100644 --- a/test/xpu/test_sparse_utils_op_xpu.py +++ b/test/xpu/test_sparse_utils_op_xpu.py @@ -29,17 +29,17 @@ def test_create_coo_by_tensor(self): coo = paddle.sparse.sparse_coo_tensor( dense_indices, dense_elements, dense_shape, stop_gradient=False ) - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_create_coo_by_np(self): indices = [[0, 1, 2], [1, 2, 0]] values = [1.0, 2.0, 3.0] dense_shape = [3, 3] coo = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape) - assert np.array_equal(3, coo.nnz()) - assert np.array_equal(indices, coo.indices().numpy()) - assert np.array_equal(values, coo.values().numpy()) + np.testing.assert_array_equal(3, coo.nnz()) + np.testing.assert_array_equal(indices, coo.indices().numpy()) + np.testing.assert_array_equal(values, coo.values().numpy()) def test_place(self): indices = [[0, 1], [0, 1]] diff --git a/test/xpu/test_tile_op_xpu.py b/test/xpu/test_tile_op_xpu.py index 2e661199a0928..ae2e1b2f0d7dd 100644 --- a/test/xpu/test_tile_op_xpu.py +++ b/test/xpu/test_tile_op_xpu.py @@ -219,9 +219,9 @@ def test_api(self): out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) out_3 = paddle.tile(x, repeat_times=repeat_times) - assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) - assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) + np.testing.assert_array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) class TestTileAPI_ZeroDim(unittest.TestCase): diff --git a/test/xpu/test_unbind_op_xpu.py b/test/xpu/test_unbind_op_xpu.py index fa77e80fb6806..3ec10511a7e90 100644 --- a/test/xpu/test_unbind_op_xpu.py +++ b/test/xpu/test_unbind_op_xpu.py @@ -50,8 +50,8 @@ def test_unbind(self): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) def test_unbind_dygraph(self): with fluid.dygraph.guard(): @@ -89,8 +89,8 @@ def test_layers_unbind(self): fetch_list=[out_0, out_1], ) - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) + np.testing.assert_array_equal(res_1, input_1[0, 0:100]) + np.testing.assert_array_equal(res_2, input_1[1, 0:100]) class TestUnbindOp(XPUOpTest): def initParameters(self): diff --git a/test/xpu/test_update_loss_scaling_op_xpu.py b/test/xpu/test_update_loss_scaling_op_xpu.py index 86e6aac6badb5..c8e398a3d7782 100644 --- a/test/xpu/test_update_loss_scaling_op_xpu.py +++ b/test/xpu/test_update_loss_scaling_op_xpu.py @@ -174,14 +174,20 @@ def loss_scaling_check(self, scope=fluid.Scope()): num_bad_steps, ], ) - assert np.array_equal(result_v[0], a_v) - assert np.array_equal(result_v[1], b_v) - assert np.array_equal(result_v[0], result_v[2]) - assert np.array_equal(result_v[1], result_v[3]) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], a_v) + np.testing.assert_array_equal(result_v[1], b_v) + np.testing.assert_array_equal(result_v[0], result_v[2]) + np.testing.assert_array_equal(result_v[1], result_v[3]) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * incr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): a = paddle.static.data( @@ -252,14 +258,20 @@ def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): num_bad_steps, ], ) - assert np.array_equal(result_v[0], np.zeros_like(a_v)) - assert np.array_equal(result_v[1], np.zeros_like(b_v)) - assert np.array_equal(result_v[2], np.zeros_like(a_v)) - assert np.array_equal(result_v[3], np.zeros_like(b_v)) - assert np.array_equal(result_v[4], found_inf_v) - assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio) - assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) - assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) + np.testing.assert_array_equal(result_v[0], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[1], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[2], np.zeros_like(a_v)) + np.testing.assert_array_equal(result_v[3], np.zeros_like(b_v)) + np.testing.assert_array_equal(result_v[4], found_inf_v) + np.testing.assert_array_equal( + result_v[5], prev_loss_scaling_v * decr_ratio + ) + np.testing.assert_array_equal( + result_v[6], np.zeros_like(num_good_steps_v) + ) + np.testing.assert_array_equal( + result_v[7], np.zeros_like(num_bad_steps_v) + ) def test_loss_scaling(self): main = fluid.Program() diff --git a/test/xpu/test_where_op_xpu.py b/test/xpu/test_where_op_xpu.py index 8dd7500517aed..13ec8c8c446a7 100644 --- a/test/xpu/test_where_op_xpu.py +++ b/test/xpu/test_where_op_xpu.py @@ -132,18 +132,18 @@ def test_api(self): feed={'cond': self.cond, 'x': self.x, 'y': self.y}, fetch_list=fetch_list, ) - assert np.array_equal(out[0], self.out) + np.testing.assert_array_equal(out[0], self.out) if x_stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_x_backward(out[1]) ) if y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[3], self.ref_y_backward(out[1]) ) elif y.stop_gradient is False: - assert np.array_equal( + np.testing.assert_array_equal( out[2], self.ref_y_backward(out[1]) ) @@ -165,7 +165,7 @@ def test_api_broadcast(self, use_cuda=False): out = exe.run( train_prog, feed={'x': x_i, 'y': y_i}, fetch_list=[result] ) - assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i)) + np.testing.assert_array_equal(out[0], np.where(x_i > 1, x_i, y_i)) class TestWhereDygraphAPI(unittest.TestCase): @@ -178,7 +178,9 @@ def test_api(self): y = fluid.dygraph.to_variable(y_i) cond = fluid.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) - assert np.array_equal(out.numpy(), np.where(cond_i, x_i, y_i)) + np.testing.assert_array_equal( + out.numpy(), np.where(cond_i, x_i, y_i) + ) if __name__ == '__main__':