Skip to content

Commit

Permalink
[Doctest]fix No.171-180, test=docs_preview
Browse files Browse the repository at this point in the history
  • Loading branch information
whisky-12 committed Sep 8, 2023
1 parent e98875b commit 33e1d98
Show file tree
Hide file tree
Showing 9 changed files with 1,132 additions and 1,041 deletions.
134 changes: 70 additions & 64 deletions python/paddle/amp/auto_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,20 +298,21 @@ def amp_guard(
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
>>> import numpy as np
>>> import paddle
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
conv2d = paddle.nn.Conv2D(3, 2, 3)
data = paddle.to_tensor(data)
with paddle.amp.amp_guard():
conv = conv2d(data)
print(conv.dtype) # FP16
with paddle.amp.amp_guard(enable=False):
conv = conv2d(data)
print(conv.dtype) # FP32
>>> paddle.seed(2023)
>>> data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
>>> conv2d = paddle.nn.Conv2D(3, 2, 3)
>>> data = paddle.to_tensor(data)
>>> with paddle.amp.amp_guard():
... conv = conv2d(data)
... assert conv.dtype == paddle.float16
>>> with paddle.amp.amp_guard(enable=False):
... conv = conv2d(data)
... assert conv.dtype == paddle.float32
"""
amp_state = locals()
Expand Down Expand Up @@ -515,50 +516,54 @@ def amp_decorate(
Examples:
.. code-block:: python
.. code-block:: python
# required: gpu
# Demo1: single model and optimizer:
import paddle
>>> # doctest: +REQUIRES(env:GPU)
>>> # Demo1: single model and optimizer:
>>> import paddle
>>> paddle.device.set_device('gpu')
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
optimizer = paddle.optimizer.SGD(parameters=model.parameters())
>>> model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
>>> optimizer = paddle.optimizer.SGD(parameters=model.parameters())
model, optimizer = paddle.amp.amp_decorate(models=model, optimizers=optimizer, level='O2')
>>> model, optimizer = paddle.amp.amp_decorate(models=model, optimizers=optimizer, level='O2')
data = paddle.rand([10, 3, 32, 32])
>>> paddle.seed(2023)
>>> data = paddle.rand([10, 3, 32, 32])
with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
output = model(data)
print(output.dtype) # FP16
>>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
... output = model(data)
... assert output.dtype == paddle.float16
# required: gpu
# Demo2: multi models and optimizers:
model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
>>> # doctest: +REQUIRES(env:GPU)
>>> # Demo2: multi models and optimizers:
>>> model2 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
>>> optimizer2 = paddle.optimizer.Adam(parameters=model2.parameters())
models, optimizers = paddle.amp.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
>>> models, optimizers = paddle.amp.amp_decorate(models=[model, model2], optimizers=[optimizer, optimizer2], level='O2')
data = paddle.rand([10, 3, 32, 32])
>>> paddle.seed(2023)
>>> data = paddle.rand([10, 3, 32, 32])
with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
output = models[0](data)
output2 = models[1](data)
print(output.dtype) # FP16
print(output2.dtype) # FP16
>>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
... output = models[0](data)
... output2 = models[1](data)
... assert output.dtype == paddle.float16
... assert output2.dtype == paddle.float16
# required: gpu
# Demo3: optimizers is None:
model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())
>>> # doctest: +REQUIRES(env:GPU)
>>> # Demo3: optimizers is None:
>>> model3 = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
>>> optimizer3 = paddle.optimizer.Adam(parameters=model2.parameters())
model = paddle.amp.amp_decorate(models=model3, level='O2')
>>> model = paddle.amp.amp_decorate(models=model3, level='O2')
data = paddle.rand([10, 3, 32, 32])
>>> paddle.seed(2023)
>>> data = paddle.rand([10, 3, 32, 32])
with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
output = model(data)
print(output.dtype) # FP16
>>> with paddle.amp.amp_guard(enable=True, custom_white_list=None, custom_black_list=None, level='O2'):
... output = model(data)
... assert output.dtype == paddle.float16
"""
if not (level in ['O1', 'O2']):
raise ValueError(
Expand Down Expand Up @@ -717,34 +722,35 @@ def auto_cast(
Examples:
.. code-block:: python
.. code-block:: python
import paddle
>>> import paddle
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32])
>>> conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
>>> paddle.seed(2023)
>>> data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = conv2d(data)
print(conv.dtype) # paddle.float16
>>> with paddle.amp.auto_cast():
... conv = conv2d(data)
... assert conv.dtype == paddle.float16
with paddle.amp.auto_cast(enable=False):
conv = conv2d(data)
print(conv.dtype) # paddle.float32
>>> with paddle.amp.auto_cast(enable=False):
... conv = conv2d(data)
... assert conv.dtype == paddle.float32
with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
conv = conv2d(data)
print(conv.dtype) # paddle.float32
>>> with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
... conv = conv2d(data)
... assert conv.dtype == paddle.float32
a = paddle.rand([2,3])
b = paddle.rand([2,3])
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
c = a + b
print(c.dtype) # paddle.float16
>>> a = paddle.rand([2,3])
>>> b = paddle.rand([2,3])
>>> with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
... c = a + b
... assert c.dtype == paddle.float16
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O2'):
d = a + b
print(d.dtype) # paddle.float16
>>> with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O2'):
... d = a + b
... assert d.dtype == paddle.float16
"""
return amp_guard(
Expand Down
Loading

0 comments on commit 33e1d98

Please sign in to comment.