Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] reformat example code with google style in No.297、298、302 #56861

Merged
merged 15 commits into from
Sep 6, 2023
Merged
53 changes: 26 additions & 27 deletions python/paddle/incubate/operators/unzip.py
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个文件 #56826 已经改过了,需要恢复

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

是我看错了么,这个文件没恢复啊

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

恢复了,因为不需要改

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个文件仍然有 diff,需要 merge develop 确保没有 diff

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sure?我是直接用的paddle官方的py文件替换的

Original file line number Diff line number Diff line change
Expand Up @@ -33,34 +33,33 @@ def unzip(input, lod):
Examples:

.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
paddle.enable_static()
input_np = np.array([
[1.0, 2.0, 3.0, 4.0],
[10.0, 20.0, 30.0, 40.0],
[100.0, 200.0, 300.0, 400.0]
])
lod_np = np.array([0, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12])
input = paddle.to_tensor(input_np, "int64")
lod = paddle.to_tensor(lod_np, "int64")

unzipped_input = paddle.incubate.unzip(input, lod)
'''
unzipped_input is [
[1.0, 2.0, 3.0, 4.0],
[0.0, 0.0, 0.0, 0.0],
[10.0, 20.0, 30.0, 40.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[100.0, 200.0, 300.0, 400.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]
]
'''
>>> # doctest: +REQUIRES(env:GPU)
>>> import numpy as np
>>> import paddle
>>> paddle.set_device('gpu')
>>> input_np = np.array([
... [1.0, 2.0, 3.0, 4.0],
... [10.0, 20.0, 30.0, 40.0],
... [100.0, 200.0, 300.0, 400.0]
... ])
>>> lod_np = np.array([0, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12])
>>> input = paddle.to_tensor(input_np, "int64")
>>> lod = paddle.to_tensor(lod_np, "int64")
>>> unzipped_input = paddle.incubate.operators.unzip(input, lod)
>>> print(unzipped_input)
Tensor(shape=[10, 4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
[[1 , 2 , 3 , 4 ],
[0 , 0 , 0 , 0 ],
[10, 20, 30, 40],
[0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 ],
[100, 200, 300, 400],
[0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 ]])

"""
helper = LayerHelper('unzip', **locals())
out = helper.create_variable(dtype=input.dtype)
Expand Down
72 changes: 36 additions & 36 deletions python/paddle/incubate/optimizer/functional/bfgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,46 +81,46 @@ def minimize_bfgs(
.. code-block:: python
:name: code-example1

# Example1: 1D Grid Parameters
import paddle
# Randomly simulate a batch of input data
inputs = paddle. normal(shape=(100, 1))
labels = inputs * 2.0
# define the loss function
def loss(w):
y = w * inputs
return paddle.nn.functional.square_error_cost(y, labels).mean()
# Initialize weight parameters
w = paddle.normal(shape=(1,))
# Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
for epoch in range(0, 10):
# Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
# Use paddle.assign to update parameters in place
paddle. assign(w_update, w)
>>> # Example1: 1D Grid Parameters
>>> import paddle
>>> # Randomly simulate a batch of input data
>>> inputs = paddle. normal(shape=(100, 1))
>>> labels = inputs * 2.0
>>> # define the loss function
>>> def loss(w):
... y = w * inputs
... return paddle.nn.functional.square_error_cost(y, labels).mean()
>>> # Initialize weight parameters
>>> w = paddle.normal(shape=(1,))
>>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
>>> for epoch in range(0, 10):
... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
... # Use paddle.assign to update parameters in place
... paddle. assign(w_update, w)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
... paddle. assign(w_update, w)
... paddle.assign(w_update, w)


.. code-block:: python
:name: code-example2

# Example2: Multidimensional Grid Parameters
import paddle
def flatten(x):
return x. flatten()
def unflatten(x):
return x.reshape((2,2))
# Assume the network parameters are more than one dimension
def net(x):
assert len(x.shape) > 1
return x.square().mean()
# function to be optimized
def bfgs_f(flatten_x):
return net(unflatten(flatten_x))
x = paddle.rand([2,2])
for i in range(0, 10):
# Flatten x before using minimize_bfgs
x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
# unflatten x_update, then update parameters
paddle. assign(unflatten(x_update), x)
>>> # Example2: Multidimensional Grid Parameters
>>> import paddle
>>> def flatten(x):
... return x. flatten()
>>> def unflatten(x):
... return x.reshape((2,2))
>>> # Assume the network parameters are more than one dimension
>>> def net(x):
... assert len(x.shape) > 1
... return x.square().mean()
>>> # function to be optimized
>>> def bfgs_f(flatten_x):
... return net(unflatten(flatten_x))
>>> x = paddle.rand([2,2])
>>> for i in range(0, 10):
... # Flatten x before using minimize_bfgs
... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
... # unflatten x_update, then update parameters
... paddle. assign(unflatten(x_update), x)
SigureMo marked this conversation as resolved.
Show resolved Hide resolved
"""

if dtype not in ['float32', 'float64']:
Expand Down
72 changes: 36 additions & 36 deletions python/paddle/incubate/optimizer/functional/lbfgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,46 +82,46 @@ def minimize_lbfgs(
.. code-block:: python
:name: code-example1

# Example1: 1D Grid Parameters
import paddle
# Randomly simulate a batch of input data
inputs = paddle. normal(shape=(100, 1))
labels = inputs * 2.0
# define the loss function
def loss(w):
y = w * inputs
return paddle.nn.functional.square_error_cost(y, labels).mean()
# Initialize weight parameters
w = paddle.normal(shape=(1,))
# Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
for epoch in range(0, 10):
# Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
# Use paddle.assign to update parameters in place
paddle. assign(w_update, w)
>>> # Example1: 1D Grid Parameters
>>> import paddle
>>> # Randomly simulate a batch of input data
>>> inputs = paddle. normal(shape=(100, 1))
>>> labels = inputs * 2.0
>>> # define the loss function
>>> def loss(w):
... y = w * inputs
... return paddle.nn.functional.square_error_cost(y, labels).mean()
>>> # Initialize weight parameters
>>> w = paddle.normal(shape=(1,))
>>> # Call the bfgs method to solve the weight that makes the loss the smallest, and update the parameters
>>> for epoch in range(0, 10):
... # Call the bfgs method to optimize the loss, note that the third parameter returned represents the weight
... w_update = paddle.incubate.optimizer.functional.minimize_bfgs(loss, w)[2]
... # Use paddle.assign to update parameters in place
... paddle. assign(w_update, w)
SigureMo marked this conversation as resolved.
Show resolved Hide resolved

.. code-block:: python
:name: code-example2

# Example2: Multidimensional Grid Parameters
import paddle
def flatten(x):
return x. flatten()
def unflatten(x):
return x.reshape((2,2))
# Assume the network parameters are more than one dimension
def net(x):
assert len(x.shape) > 1
return x.square().mean()
# function to be optimized
def bfgs_f(flatten_x):
return net(unflatten(flatten_x))
x = paddle.rand([2,2])
for i in range(0, 10):
# Flatten x before using minimize_bfgs
x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
# unflatten x_update, then update parameters
paddle. assign(unflatten(x_update), x)
>>> # Example2: Multidimensional Grid Parameters
>>> import paddle
>>> def flatten(x):
... return x. flatten()
>>> def unflatten(x):
... return x.reshape((2,2))
>>> # Assume the network parameters are more than one dimension
>>> def net(x):
... assert len(x.shape) > 1
... return x.square().mean()
>>> # function to be optimized
>>> def bfgs_f(flatten_x):
... return net(unflatten(flatten_x))
>>> x = paddle.rand([2,2])
>>> for i in range(0, 10):
... # Flatten x before using minimize_bfgs
... x_update = paddle.incubate.optimizer.functional.minimize_bfgs(bfgs_f, flatten(x))[2]
... # unflatten x_update, then update parameters
... paddle. assign(unflatten(x_update), x)
SigureMo marked this conversation as resolved.
Show resolved Hide resolved

"""
if dtype not in ['float32', 'float64']:
Expand Down
Loading