Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] reformat example code with google style in No.21-30 #55849

Merged
merged 14 commits into from
Aug 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
383 changes: 194 additions & 189 deletions python/paddle/autograd/py_layer.py

Large diffs are not rendered by default.

104 changes: 52 additions & 52 deletions python/paddle/autograd/saved_tensors_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,58 +45,58 @@ class saved_tensors_hooks:
Examples:
.. code-block:: python

# Example1
import paddle

def pack_hook(x):
print("Packing", x)
return x.numpy()

def unpack_hook(x):
print("UnPacking", x)
return paddle.to_tensor(x)

a = paddle.ones([3,3])
b = paddle.ones([3,3]) * 2
a.stop_gradient = False
b.stop_gradient = False
with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
y = paddle.multiply(a, b)
y.sum().backward()

# Example2
import paddle
from paddle.autograd import PyLayer

class cus_multiply(PyLayer):
@staticmethod
def forward(ctx, a, b):
y = paddle.multiply(a, b)
ctx.save_for_backward(a, b)
return y

@staticmethod
def backward(ctx, dy):
a,b = ctx.saved_tensor()
grad_a = dy * a
grad_b = dy * b
return grad_a, grad_b

def pack_hook(x):
print("Packing", x)
return x.numpy()

def unpack_hook(x):
print("UnPacking", x)
return paddle.to_tensor(x)

a = paddle.ones([3,3])
b = paddle.ones([3,3]) * 2
a.stop_gradient = False
b.stop_gradient = False
with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
y = cus_multiply.apply(a, b)
y.sum().backward()
>>> # Example1
>>> import paddle

>>> def pack_hook(x):
... print("Packing", x)
... return x.numpy()

>>> def unpack_hook(x):
... print("UnPacking", x)
... return paddle.to_tensor(x)

>>> a = paddle.ones([3,3])
>>> b = paddle.ones([3,3]) * 2
>>> a.stop_gradient = False
>>> b.stop_gradient = False
>>> with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
... y = paddle.multiply(a, b)
>>> y.sum().backward()

>>> # Example2
>>> import paddle
>>> from paddle.autograd import PyLayer

>>> class cus_multiply(PyLayer):
... @staticmethod
... def forward(ctx, a, b):
... y = paddle.multiply(a, b)
... ctx.save_for_backward(a, b)
... return y
...
... @staticmethod
... def backward(ctx, dy):
... a,b = ctx.saved_tensor()
... grad_a = dy * a
... grad_b = dy * b
... return grad_a, grad_b

>>> def pack_hook(x):
... print("Packing", x)
... return x.numpy()

>>> def unpack_hook(x):
... print("UnPacking", x)
... return paddle.to_tensor(x)

>>> a = paddle.ones([3,3])
>>> b = paddle.ones([3,3]) * 2
>>> a.stop_gradient = False
>>> b.stop_gradient = False
>>> with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
... y = cus_multiply.apply(a, b)
>>> y.sum().backward()
"""

def __init__(self, pack_hook, unpack_hook):
Expand Down
52 changes: 32 additions & 20 deletions python/paddle/framework/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,19 @@ def iinfo(dtype):
Examples:
.. code-block:: python

import paddle

iinfo_uint8 = paddle.iinfo(paddle.uint8)
print(iinfo_uint8)
# paddle.iinfo(min=0, max=255, bits=8, dtype=uint8)
print(iinfo_uint8.min) # 0
print(iinfo_uint8.max) # 255
print(iinfo_uint8.bits) # 8
print(iinfo_uint8.dtype) # uint8
>>> import paddle

>>> iinfo_uint8 = paddle.iinfo(paddle.uint8)
>>> print(iinfo_uint8)
paddle.iinfo(min=0, max=255, bits=8, dtype=uint8)
>>> print(iinfo_uint8.min)
0
>>> print(iinfo_uint8.max)
255
>>> print(iinfo_uint8.bits)
8
>>> print(iinfo_uint8.dtype)
uint8

"""
return core_iinfo(dtype)
Expand Down Expand Up @@ -98,17 +102,25 @@ def finfo(dtype):
Examples:
.. code-block:: python

import paddle

finfo_float32 = paddle.finfo(paddle.float32)
print(finfo_float32.min) # -3.40282e+38
print(finfo_float32.max) # 3.40282e+38
print(finfo_float32.eps) # 1.19209e-07
print(finfo_float32.resolution) # 1e-06
print(finfo_float32.smallest_normal) # 1.17549e-38
print(finfo_float32.tiny) # 1.17549e-38
print(finfo_float32.bits) # 32
print(finfo_float32.dtype) # float32
>>> import paddle

>>> finfo_float32 = paddle.finfo(paddle.float32)
>>> print(finfo_float32.min)
-3.4028234663852886e+38
>>> print(finfo_float32.max)
3.4028234663852886e+38
>>> print(finfo_float32.eps)
1.1920928955078125e-07
>>> print(finfo_float32.resolution)
1e-06
>>> print(finfo_float32.smallest_normal)
1.1754943508222875e-38
>>> print(finfo_float32.tiny)
1.1754943508222875e-38
>>> print(finfo_float32.bits)
32
>>> print(finfo_float32.dtype)
float32

"""
return core_finfo(dtype)
8 changes: 4 additions & 4 deletions python/paddle/framework/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ def set_default_dtype(d):
Examples:
.. code-block:: python

import paddle
paddle.set_default_dtype("float32")
>>> import paddle
>>> paddle.set_default_dtype("float32")

"""
if isinstance(d, type):
Expand Down Expand Up @@ -76,7 +76,7 @@ def get_default_dtype():
Examples:
.. code-block:: python

import paddle
paddle.get_default_dtype()
>>> import paddle
>>> paddle.get_default_dtype()
"""
return LayerHelperBase.get_default_dtype()
Loading