Skip to content

Commit df0f884

Browse files
whisky-12SigureMo
authored andcommitted
[xdoctest][task 171-180] reformat example code with google style in audio/* (PaddlePaddle#57111)
* [Doctest]fix No.171-180, test=docs_preview * [Doctest]fix No.171-180, test=docs_preview * fix python/paddle/amp/auto_cast.py * fix python/paddle/amp/debugging.py * fix python/paddle/amp/grad_scaler.py * fix python/paddle/device/__init__.py * fix python/paddle/profiler/utils.py * fix python/paddle/sparse/binary.py * fix python/paddle/sparse/creation.py * fix ci error; * remove prompt before output * skip some output --------- Co-authored-by: SigureMo <[email protected]>
1 parent d25bcb2 commit df0f884

File tree

9 files changed

+1192
-1055
lines changed

9 files changed

+1192
-1055
lines changed

Diff for: python/paddle/amp/auto_cast.py

+105-78
Original file line numberDiff line numberDiff line change
@@ -298,21 +298,27 @@ def amp_guard(
298298
299299
Examples:
300300
301-
.. code-block:: python
302-
303-
import numpy as np
304-
import paddle
301+
.. code-block:: python
305302
306-
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
307-
conv2d = paddle.nn.Conv2D(3, 2, 3)
308-
data = paddle.to_tensor(data)
309-
with paddle.amp.amp_guard():
310-
conv = conv2d(data)
311-
print(conv.dtype) # FP16
312-
with paddle.amp.amp_guard(enable=False):
313-
conv = conv2d(data)
314-
print(conv.dtype) # FP32
303+
>>> # doctest: +REQUIRES(env:GPU)
304+
>>> import paddle
315305
306+
>>> data = paddle.uniform([10, 3, 32, 32], paddle.float32, -1, 1)
307+
>>> conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
308+
>>> conv2d = paddle.amp.amp_decorate(models=conv2d, level='O2')
309+
>>> with paddle.amp.amp_guard():
310+
... conv = conv2d(data)
311+
... print(conv.dtype)
312+
>>> # doctest: +SKIP("This has diff in xdoctest env")
313+
paddle.float16
314+
>>> # doctest: -SKIP
315+
...
316+
>>> with paddle.amp.amp_guard(enable=False):
317+
... conv = conv2d(data)
318+
... print(conv.dtype)
319+
>>> # doctest: +SKIP("This has diff in xdoctest env")
320+
paddle.float32
321+
>>> # doctest: -SKIP
316322
"""
317323
amp_state = locals()
318324
global _g_amp_state_
@@ -515,50 +521,53 @@ def amp_decorate(
515521
516522
Examples:
517523
518-
.. code-block:: python
524+
.. code-block:: python
519525
520-
# required: gpu
521-
# Demo1: single model and optimizer:
522-
import paddle
526+
>>> # doctest: +REQUIRES(env:GPU)
527+
>>> # Demo1: single model and optimizer:
528+
>>> import paddle
529+
>>> paddle.device.set_device('gpu')
523530
524-
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
525-
optimizer = paddle.optimizer.SGD(parameters=model.parameters())
531+
>>> model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
532+
>>> optimizer = paddle.optimizer.SGD(parameters=model.parameters())
526533
527-
model, optimizer = paddle.amp.amp_decorate(models=model, optimizers=optimizer, level='O2')
534+
>>> model, optimizer = paddle.amp.amp_decorate(models=model, optimizers=optimizer, level='O2')
528535
529-
data = paddle.rand([10, 3, 32, 32])
536+
>>> data = paddle.rand([10, 3, 32, 32])
530537
531-
with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
532-
output = model(data)
533-
print(output.dtype) # FP16
538+
>>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
539+
... output = model(data)
540+
... print(output.dtype)
541+
paddle.float16
534542
535-
# required: gpu
536-
# Demo2: multi models and optimizers:
537-
model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
538-
optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
543+
>>> # Demo2: multi models and optimizers:
544+
>>> model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
545+
>>> optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
539546
540-
models, optimizers = paddle.amp.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
547+
>>> models, optimizers = paddle.amp.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
541548
542-
data = paddle.rand([10, 3, 32, 32])
549+
>>> data = paddle.rand([10, 3, 32, 32])
543550
544-
with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
545-
output = models[0](data)
546-
output2 = models[1](data)
547-
print(output.dtype) # FP16
548-
print(output2.dtype) # FP16
551+
>>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
552+
... output = models[0](data)
553+
... output2 = models[1](data)
554+
... print(output.dtype)
555+
... print(output2.dtype)
556+
paddle.float16
557+
paddle.float16
549558
550-
# required: gpu
551-
# Demo3: optimizers is None:
552-
model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
553-
optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())
559+
>>> # Demo3: optimizers is None:
560+
>>> model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
561+
>>> optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())
554562
555-
model = paddle.amp.amp_decorate(models=model3, level='O2')
563+
>>> model = paddle.amp.amp_decorate(models=model3, level='O2')
556564
557-
data = paddle.rand([10, 3, 32, 32])
565+
>>> data = paddle.rand([10, 3, 32, 32])
558566
559-
with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
560-
output = model(data)
561-
print(output.dtype) # FP16
567+
>>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
568+
... output = model(data)
569+
... print(output.dtype)
570+
paddle.float16
562571
"""
563572
if not (level in ['O1', 'O2']):
564573
raise ValueError(
@@ -717,34 +726,50 @@ def auto_cast(
717726
718727
Examples:
719728
720-
.. code-block:: python
721-
722-
import paddle
723-
724-
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
725-
data = paddle.rand([10, 3, 32, 32])
726-
727-
with paddle.amp.auto_cast():
728-
conv = conv2d(data)
729-
print(conv.dtype) # paddle.float16
730-
731-
with paddle.amp.auto_cast(enable=False):
732-
conv = conv2d(data)
733-
print(conv.dtype) # paddle.float32
729+
.. code-block:: python
734730
735-
with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
736-
conv = conv2d(data)
737-
print(conv.dtype) # paddle.float32
731+
>>> # doctest: +REQUIRES(env:GPU)
732+
>>> import paddle
738733
739-
a = paddle.rand([2,3])
740-
b = paddle.rand([2,3])
741-
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
742-
c = a + b
743-
print(c.dtype) # paddle.float16
734+
>>> conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
735+
>>> data = paddle.rand([10, 3, 32, 32])
744736
745-
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O2'):
746-
d = a + b
747-
print(d.dtype) # paddle.float16
737+
>>> with paddle.amp.auto_cast():
738+
... conv = conv2d(data)
739+
... print(conv.dtype)
740+
>>> # doctest: +SKIP("This has diff in xdoctest env")
741+
paddle.float16
742+
>>> # doctest: -SKIP
743+
744+
>>> with paddle.amp.auto_cast(enable=False):
745+
... conv = conv2d(data)
746+
... print(conv.dtype)
747+
>>> # doctest: +SKIP("This has diff in xdoctest env")
748+
paddle.float32
749+
>>> # doctest: -SKIP
750+
751+
>>> with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
752+
... conv = conv2d(data)
753+
... print(conv.dtype)
754+
>>> # doctest: +SKIP("This has diff in xdoctest env")
755+
paddle.float32
756+
>>> # doctest: -SKIP
757+
758+
>>> a = paddle.rand([2, 3])
759+
>>> b = paddle.rand([2, 3])
760+
>>> with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
761+
... c = a + b
762+
... print(c.dtype)
763+
>>> # doctest: +SKIP("This has diff in xdoctest env")
764+
paddle.float16
765+
>>> # doctest: -SKIP
766+
767+
>>> with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O2'):
768+
... d = a + b
769+
... print(d.dtype)
770+
>>> # doctest: +SKIP("This has diff in xdoctest env")
771+
paddle.float16
772+
>>> # doctest: -SKIP
748773
749774
"""
750775
return amp_guard(
@@ -800,9 +825,9 @@ def decorate(
800825
801826
>>> with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
802827
... output = model(data)
803-
... assert output.dtype == paddle.float16
828+
... print(output.dtype)
829+
paddle.float16
804830
805-
>>> # doctest: +REQUIRES(env:GPU)
806831
>>> # Demo2: multi models and optimizers:
807832
>>> model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
808833
>>> optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
@@ -812,12 +837,13 @@ def decorate(
812837
>>> data = paddle.rand([10, 3, 32, 32])
813838
814839
>>> with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
815-
... output = models[0](data)
816-
... output2 = models[1](data)
817-
... assert output.dtype == paddle.float16
818-
... assert output2.dtype == paddle.float16
840+
... output = models[0](data)
841+
... output2 = models[1](data)
842+
... print(output.dtype)
843+
... print(output2.dtype)
844+
paddle.float16
845+
paddle.float16
819846
820-
>>> # doctest: +REQUIRES(env:GPU)
821847
>>> # Demo3: optimizers is None:
822848
>>> model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
823849
>>> optimizer3 = paddle.optimizer.Adam(parameters=model3.parameters())
@@ -827,8 +853,9 @@ def decorate(
827853
>>> data = paddle.rand([10, 3, 32, 32])
828854
829855
>>> with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
830-
... output = model(data)
831-
... assert output.dtype == paddle.float16
856+
... output = model(data)
857+
... print(output.dtype)
858+
paddle.float16
832859
833860
"""
834861
return amp_decorate(

0 commit comments

Comments
 (0)