@@ -298,21 +298,27 @@ def amp_guard(
298
298
299
299
Examples:
300
300
301
- .. code-block:: python
302
-
303
- import numpy as np
304
- import paddle
301
+ .. code-block:: python
305
302
306
- data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
307
- conv2d = paddle.nn.Conv2D(3, 2, 3)
308
- data = paddle.to_tensor(data)
309
- with paddle.amp.amp_guard():
310
- conv = conv2d(data)
311
- print(conv.dtype) # FP16
312
- with paddle.amp.amp_guard(enable=False):
313
- conv = conv2d(data)
314
- print(conv.dtype) # FP32
303
+ >>> # doctest: +REQUIRES(env:GPU)
304
+ >>> import paddle
315
305
306
+ >>> data = paddle.uniform([10, 3, 32, 32], paddle.float32, -1, 1)
307
+ >>> conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
308
+ >>> conv2d = paddle.amp.amp_decorate(models=conv2d, level='O2')
309
+ >>> with paddle.amp.amp_guard():
310
+ ... conv = conv2d(data)
311
+ ... print(conv.dtype)
312
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
313
+ paddle.float16
314
+ >>> # doctest: -SKIP
315
+ ...
316
+ >>> with paddle.amp.amp_guard(enable=False):
317
+ ... conv = conv2d(data)
318
+ ... print(conv.dtype)
319
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
320
+ paddle.float32
321
+ >>> # doctest: -SKIP
316
322
"""
317
323
amp_state = locals ()
318
324
global _g_amp_state_
@@ -515,50 +521,53 @@ def amp_decorate(
515
521
516
522
Examples:
517
523
518
- .. code-block:: python
524
+ .. code-block:: python
519
525
520
- # required: gpu
521
- # Demo1: single model and optimizer:
522
- import paddle
526
+ >>> # doctest: +REQUIRES(env:GPU)
527
+ >>> # Demo1: single model and optimizer:
528
+ >>> import paddle
529
+ >>> paddle.device.set_device('gpu')
523
530
524
- model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
525
- optimizer = paddle.optimizer.SGD(parameters=model.parameters())
531
+ >>> model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
532
+ >>> optimizer = paddle.optimizer.SGD(parameters=model.parameters())
526
533
527
- model, optimizer = paddle.amp.amp_decorate(models=model, optimizers=optimizer, level='O2')
534
+ >>> model, optimizer = paddle.amp.amp_decorate(models=model, optimizers=optimizer, level='O2')
528
535
529
- data = paddle.rand([10, 3, 32, 32])
536
+ >>> data = paddle.rand([10, 3, 32, 32])
530
537
531
- with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
532
- output = model(data)
533
- print(output.dtype) # FP16
538
+ >>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
539
+ ... output = model(data)
540
+ ... print(output.dtype)
541
+ paddle.float16
534
542
535
- # required: gpu
536
- # Demo2: multi models and optimizers:
537
- model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
538
- optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
543
+ >>> # Demo2: multi models and optimizers:
544
+ >>> model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
545
+ >>> optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
539
546
540
- models, optimizers = paddle.amp.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
547
+ >>> models, optimizers = paddle.amp.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
541
548
542
- data = paddle.rand([10, 3, 32, 32])
549
+ >>> data = paddle.rand([10, 3, 32, 32])
543
550
544
- with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
545
- output = models[0](data)
546
- output2 = models[1](data)
547
- print(output.dtype) # FP16
548
- print(output2.dtype) # FP16
551
+ >>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
552
+ ... output = models[0](data)
553
+ ... output2 = models[1](data)
554
+ ... print(output.dtype)
555
+ ... print(output2.dtype)
556
+ paddle.float16
557
+ paddle.float16
549
558
550
- # required: gpu
551
- # Demo3: optimizers is None:
552
- model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
553
- optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())
559
+ >>> # Demo3: optimizers is None:
560
+ >>> model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
561
+ >>> optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())
554
562
555
- model = paddle.amp.amp_decorate(models=model3, level='O2')
563
+ >>> model = paddle.amp.amp_decorate(models=model3, level='O2')
556
564
557
- data = paddle.rand([10, 3, 32, 32])
565
+ >>> data = paddle.rand([10, 3, 32, 32])
558
566
559
- with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
560
- output = model(data)
561
- print(output.dtype) # FP16
567
+ >>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
568
+ ... output = model(data)
569
+ ... print(output.dtype)
570
+ paddle.float16
562
571
"""
563
572
if not (level in ['O1' , 'O2' ]):
564
573
raise ValueError (
@@ -717,34 +726,50 @@ def auto_cast(
717
726
718
727
Examples:
719
728
720
- .. code-block:: python
721
-
722
- import paddle
723
-
724
- conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
725
- data = paddle.rand([10, 3, 32, 32])
726
-
727
- with paddle.amp.auto_cast():
728
- conv = conv2d(data)
729
- print(conv.dtype) # paddle.float16
730
-
731
- with paddle.amp.auto_cast(enable=False):
732
- conv = conv2d(data)
733
- print(conv.dtype) # paddle.float32
729
+ .. code-block:: python
734
730
735
- with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
736
- conv = conv2d(data)
737
- print(conv.dtype) # paddle.float32
731
+ >>> # doctest: +REQUIRES(env:GPU)
732
+ >>> import paddle
738
733
739
- a = paddle.rand([2,3])
740
- b = paddle.rand([2,3])
741
- with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
742
- c = a + b
743
- print(c.dtype) # paddle.float16
734
+ >>> conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
735
+ >>> data = paddle.rand([10, 3, 32, 32])
744
736
745
- with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O2'):
746
- d = a + b
747
- print(d.dtype) # paddle.float16
737
+ >>> with paddle.amp.auto_cast():
738
+ ... conv = conv2d(data)
739
+ ... print(conv.dtype)
740
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
741
+ paddle.float16
742
+ >>> # doctest: -SKIP
743
+
744
+ >>> with paddle.amp.auto_cast(enable=False):
745
+ ... conv = conv2d(data)
746
+ ... print(conv.dtype)
747
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
748
+ paddle.float32
749
+ >>> # doctest: -SKIP
750
+
751
+ >>> with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
752
+ ... conv = conv2d(data)
753
+ ... print(conv.dtype)
754
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
755
+ paddle.float32
756
+ >>> # doctest: -SKIP
757
+
758
+ >>> a = paddle.rand([2, 3])
759
+ >>> b = paddle.rand([2, 3])
760
+ >>> with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
761
+ ... c = a + b
762
+ ... print(c.dtype)
763
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
764
+ paddle.float16
765
+ >>> # doctest: -SKIP
766
+
767
+ >>> with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O2'):
768
+ ... d = a + b
769
+ ... print(d.dtype)
770
+ >>> # doctest: +SKIP("This has diff in xdoctest env")
771
+ paddle.float16
772
+ >>> # doctest: -SKIP
748
773
749
774
"""
750
775
return amp_guard (
@@ -800,9 +825,9 @@ def decorate(
800
825
801
826
>>> with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
802
827
... output = model(data)
803
- ... assert output.dtype == paddle.float16
828
+ ... print(output.dtype)
829
+ paddle.float16
804
830
805
- >>> # doctest: +REQUIRES(env:GPU)
806
831
>>> # Demo2: multi models and optimizers:
807
832
>>> model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
808
833
>>> optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
@@ -812,12 +837,13 @@ def decorate(
812
837
>>> data = paddle.rand([10, 3, 32, 32])
813
838
814
839
>>> with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
815
- ... output = models[0](data)
816
- ... output2 = models[1](data)
817
- ... assert output.dtype == paddle.float16
818
- ... assert output2.dtype == paddle.float16
840
+ ... output = models[0](data)
841
+ ... output2 = models[1](data)
842
+ ... print(output.dtype)
843
+ ... print(output2.dtype)
844
+ paddle.float16
845
+ paddle.float16
819
846
820
- >>> # doctest: +REQUIRES(env:GPU)
821
847
>>> # Demo3: optimizers is None:
822
848
>>> model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
823
849
>>> optimizer3 = paddle.optimizer.Adam(parameters=model3.parameters())
@@ -827,8 +853,9 @@ def decorate(
827
853
>>> data = paddle.rand([10, 3, 32, 32])
828
854
829
855
>>> with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
830
- ... output = model(data)
831
- ... assert output.dtype == paddle.float16
856
+ ... output = model(data)
857
+ ... print(output.dtype)
858
+ paddle.float16
832
859
833
860
"""
834
861
return amp_decorate (
0 commit comments