Skip to content

Commit e8dd660

Browse files
azticeSigureMo
andcommitted
【UnitTestFix No.4】Fix unittest test_dropout_op (PaddlePaddle#75729)
* fix: using latest API * switch check_prim_pir ON * fix: Code Style Issue * remove: useless whitelist. * fix: code-style issue. * Update test/legacy_test/test_dropout_op.py Co-authored-by: Nyakku Shigure <[email protected]> * fix: code-style issue. --------- Co-authored-by: Nyakku Shigure <[email protected]>
1 parent b16d269 commit e8dd660

File tree

1 file changed

+40
-51
lines changed

1 file changed

+40
-51
lines changed

test/legacy_test/test_dropout_op.py

Lines changed: 40 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
from paddle.base import Program, Scope, core, program_guard
3535
from paddle.base.executor import scope_guard
3636
from paddle.decomposition import decompose
37-
from paddle.incubate.autograd import primapi
3837

3938

4039
def dropout_wrapper(
@@ -478,71 +477,63 @@ def test_seed_cpu_place(self):
478477
paddle.enable_static()
479478
main_program = Program()
480479
with program_guard(main_program):
480+
paddle.seed(1)
481481
seed_input_name = "tensor@SeedInput"
482482
x_var_name = "tensor@X"
483483
x_out_var = "tensor@XOut"
484484

485485
mask_var_name = "tensor@Mask"
486-
seed_input_var = main_program.global_block().create_var(
486+
seed_input_var = paddle.static.data(
487487
name=seed_input_name,
488488
shape=[1],
489489
dtype='int32',
490-
persistable=False,
491-
stop_gradient=True,
492490
)
493-
x_out_var = main_program.global_block().create_var(
491+
seed_input_var.persistable = False
492+
seed_input_var.stop_gradient = True
493+
x_out_var = paddle.static.data(
494494
name=x_out_var,
495495
shape=[40, 40],
496496
dtype='float32',
497-
persistable=False,
498-
stop_gradient=True,
499497
)
500-
x_var = main_program.global_block().create_var(
498+
x_out_var.persistable = False
499+
x_out_var.stop_gradient = True
500+
x_var = paddle.static.data(
501501
name=x_var_name,
502502
shape=[40, 40],
503503
dtype='float32',
504-
persistable=False,
505-
stop_gradient=True,
506504
)
507-
mask_var = main_program.global_block().create_var(
505+
x_var.persistable = False
506+
x_var.stop_gradient = True
507+
mask_var = paddle.static.data(
508508
name=mask_var_name,
509509
shape=[1],
510510
dtype='int',
511-
persistable=False,
512-
stop_gradient=True,
513511
)
512+
mask_var.persistable = False
513+
mask_var.stop_gradient = True
514514

515-
main_program.global_block().append_op(
516-
type="fill_constant",
517-
outputs={"Out": x_var_name},
518-
attrs={
519-
"shape": [40, 40],
520-
"dtype": x_var.dtype,
521-
"value": 1.0,
522-
"place_type": 0,
523-
},
524-
)
525-
main_program.global_block().append_op(
526-
type='seed',
527-
inputs={},
528-
outputs={'Out': seed_input_var},
529-
attrs={'seed': 1, 'force_cpu': True},
530-
)
531-
main_program.global_block().append_op(
532-
type='dropout',
533-
inputs={'X': x_var, 'Seed': seed_input_var},
534-
attrs={'dropout_prob': 0.0},
535-
outputs={'Out': x_out_var, 'Mask': mask_var},
515+
x_var = paddle.full(shape=[40, 40], dtype='float32', fill_value=1.0)
516+
x_out_var = paddle.static.data(
517+
name='x_out', shape=[40, 40], dtype='float32'
536518
)
519+
x_out_var.persistable = True
520+
tmp = paddle.nn.functional.dropout(x_var, p=0.0, training=False)
521+
paddle.assign(tmp, output=x_out_var)
522+
537523
place = base.CPUPlace()
538524
if core.is_compiled_with_cuda() or is_custom_device():
539525
place = get_device_place()
540526
exe = base.Executor(place)
541-
x_out, mask_out = exe.run(
527+
x_out = exe.run(
542528
main_program,
543-
feed={},
544-
fetch_list=[x_out_var.name, mask_var.name],
545-
)
529+
feed={
530+
'tensor@X': np.ones([40, 40], dtype=np.float32),
531+
'tensor@XOut': np.ones([40, 40], dtype=np.float32),
532+
'tensor@SeedInput': np.array([123], dtype=np.int32),
533+
'tensor@Mask': np.array([123], dtype=np.int64),
534+
},
535+
fetch_list=[x_out_var],
536+
)[0]
546537
x_in_np = np.ones([40, 40]).astype("float32")
547538
np.testing.assert_allclose(x_out, x_in_np, rtol=1e-05)
548539

@@ -1423,27 +1414,23 @@ def setUp(self):
14231414
self.places = get_places()
14241415

14251416
def check_static_result(self, place):
1426-
from paddle.distributed.fleet.meta_parallel.parallel_layers.random import (
1427-
dropout,
1428-
)
1429-
14301417
with static.program_guard(static.Program(), static.Program()):
1418+
paddle.seed(0)
14311419
input = static.data(name="input", shape=[40, 40], dtype="float32")
1432-
res1 = dropout(
1420+
res1 = paddle.nn.functional.dropout(
14331421
input,
14341422
p=0.3,
14351423
training=True,
14361424
mode='upscale_in_train',
1437-
rng_name='seed0',
14381425
)
1439-
res2 = dropout(
1426+
1427+
res2 = paddle.nn.functional.dropout(
14401428
input,
14411429
p=0.3,
14421430
training=True,
14431431
mode='upscale_in_train',
1444-
rng_name='seed1',
14451432
)
1446-
res3 = dropout(input, p=0.3)
1433+
res3 = paddle.nn.functional.dropout(input, p=0.3)
14471434

14481435
in_np = np.random.random([40, 40]).astype("float32")
14491436

@@ -1489,8 +1476,8 @@ def init_info(self):
14891476
self.api = paddle.nn.functional.dropout
14901477

14911478
def api_case(self, x):
1492-
p = paddle.assign([0.5])
1493-
out = self.api(x=x, p=p, training=True)
1479+
p = 0.5
1480+
out = self.api(x, p, training=True)
14941481
return out
14951482

14961483
def run_static(self, x):
@@ -1859,7 +1846,8 @@ def test_static_comp(self):
18591846
mode=self.mode,
18601847
)
18611848
if core._is_fwd_prim_enabled():
1862-
primapi.to_prim(mp.blocks)
1849+
# primapi.to_prim(mp.blocks)
1850+
[output] = decompose(mp, [output])
18631851
grad = paddle.static.gradients(output, input_)[0]
18641852
if self.dtype == "bfloat16":
18651853
output = paddle.cast(output, "float32")
@@ -1874,7 +1862,8 @@ def test_static_comp(self):
18741862
mps.append(mp)
18751863
for i in range(len(self.places)):
18761864
self.assertTrue(
1877-
'dropout' not in [op.type for op in mps[i].block(0).ops]
1865+
'pd_op.dropout'
1866+
not in [op.name() for op in mps[i].global_block().ops]
18781867
)
18791868
np.testing.assert_allclose(
18801869
self.fwd_desire[i].sum(),

0 commit comments

Comments
 (0)