Skip to content

Commit

Permalink
Remove reduntant numpy output in Example code (1/3), test=document_fix (
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinng77 authored Dec 7, 2022
1 parent a9f17b9 commit c9c1685
Show file tree
Hide file tree
Showing 20 changed files with 126 additions and 110 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def all_reduce(
data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
task = dist.stream.all_reduce(data, sync_op=False)
task.wait()
out = data.numpy()
out = data
# [[5, 7, 9], [5, 7, 9]]
"""
if _warn_cur_rank_not_in_group(group):
Expand Down
41 changes: 21 additions & 20 deletions python/paddle/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,26 +530,27 @@ def fftn(x, s=None, axes=None, norm="backward", name=None):
x = paddle.meshgrid(arr, arr, arr)[1]
fftn_xp = paddle.fft.fftn(x, axes=(1, 2))
print(fftn_xp.numpy())
# [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]
# [[24.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+8.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.+0.j 0.+0.j 0.+0.j 0.-0.j]
# [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]]
print(fftn_xp)
# Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(gpu:0), stop_gradient=True,
# [[[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]],
# [[(24+0j), 0j , 0j , -0j ],
# [(-8+8j), 0j , 0j , -0j ],
# [(-8+0j), 0j , 0j , -0j ],
# [(-8-8j), 0j , 0j , -0j ]]])
"""
if is_integer(x) or is_floating_point(x):
return fftn_r2c(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/incubate/nn/layer/fused_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,8 +533,8 @@ class FusedFeedForward(Layer):
fused_feedforward_layer = FusedFeedForward(8, 8)
x = paddle.rand((1, 8, 8))
out = fused_feedforward_layer(x)
print(out.numpy().shape)
# (1, 8, 8)
print(out.shape)
# [1, 8, 8]
"""

def __init__(
Expand Down
9 changes: 5 additions & 4 deletions python/paddle/nn/functional/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1677,11 +1677,12 @@ def glu(x, axis=-1, name=None):
x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
)
print(F.glu(x).numpy())
# array([[-0.15216254, -0.9004892 ],
# [-1.0577879 , -0.46985325]], dtype=float32)
print(F.glu(x))
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[-0.15216254, -0.90048921],
# [-1.05778778, -0.46985325]])
"""
check_variable_and_dtype(
Expand Down
20 changes: 8 additions & 12 deletions python/paddle/nn/functional/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,10 +657,9 @@ def conv2d(
w_var = paddle.randn((6, 3, 3, 3), dtype='float32')
y_var = F.conv2d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6]
"""
# entry checks
if data_format not in ["NCHW", "NHWC"]:
Expand Down Expand Up @@ -1234,10 +1233,9 @@ def conv2d_transpose(
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
y_var = F.conv2d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10]
"""

if data_format not in ['NCHW', 'NHWC']:
Expand Down Expand Up @@ -1523,10 +1521,9 @@ def conv3d(
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6, 6]
"""
# entry check
if data_format not in ["NCDHW", "NDHWC"]:
Expand Down Expand Up @@ -1738,10 +1735,9 @@ def conv3d_transpose(
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')
y_var = F.conv3d_transpose(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10, 10]
"""
# entry checks
if data_format not in ["NCDHW", "NDHWC"]:
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/nn/functional/distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,9 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
distance = paddle.nn.functional.pairwise_distance(x, y)
print(distance.numpy()) # [5. 5.]
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
"""
check_type(p, 'porder', (float, int), 'PairwiseDistance')
Expand Down
9 changes: 5 additions & 4 deletions python/paddle/nn/functional/extension.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,10 +215,11 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths)
print(mask.numpy())
# [[1 1 1 1 1 1 1 1 1 1]
# [1 1 1 1 1 1 1 1 1 0]
# [1 1 1 1 1 1 1 1 0 0]]
print(mask)
# Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
# [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
"""

Expand Down
41 changes: 24 additions & 17 deletions python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -1353,17 +1353,20 @@ def l1_loss(input, label, reduction='mean', name=None):
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
# [0.35]
print(l1_loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none')
print(l1_loss.numpy())
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
print(l1_loss)
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum')
print(l1_loss.numpy())
# [1.4]
print(l1_loss)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
"""
if reduction not in ['sum', 'mean', 'none']:
Expand Down Expand Up @@ -2530,9 +2533,11 @@ def cross_entropy(
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
input,
label)
print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python
Expand All @@ -2550,13 +2555,15 @@ def cross_entropy(
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
"""

Expand Down
5 changes: 2 additions & 3 deletions python/paddle/nn/functional/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,9 +368,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
x = paddle.randn(shape=[2,9,4,4])
out_var = F.pixel_shuffle(x, 3)
out = out_var.numpy()
print(out.shape)
# (2, 1, 12, 12)
print(out_var.shape)
# [2, 1, 12, 12]
"""
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
Expand Down
10 changes: 6 additions & 4 deletions python/paddle/nn/initializer/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,13 @@ class Constant(ConstantInitializer):
data = paddle.rand([30, 10, 2], dtype='float32')
linear = nn.Linear(2,
4,
weight_attr=nn.initializer.Constant(value=2.0))
4,
weight_attr=nn.initializer.Constant(value=2.0))
res = linear(data)
print(linear.weight.numpy())
#result is [[2. 2. 2. 2.],[2. 2. 2. 2.]]
print(linear.weight)
# Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[2., 2., 2., 2.],
# [2., 2., 2., 2.]])
"""

Expand Down
20 changes: 8 additions & 12 deletions python/paddle/nn/layer/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -668,9 +668,8 @@ class Conv2D(_ConvNd):
conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6]
"""

def __init__(
Expand Down Expand Up @@ -841,9 +840,8 @@ class Conv2DTranspose(_ConvNd):
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10]
"""

def __init__(
Expand Down Expand Up @@ -999,9 +997,8 @@ class Conv3D(_ConvNd):
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
print(y_var.shape)
# [2, 6, 6, 6, 6]
"""

def __init__(
Expand Down Expand Up @@ -1181,9 +1178,8 @@ class Conv3DTranspose(_ConvNd):
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
print(y_var.shape)
# [2, 6, 10, 10, 10]
"""

def __init__(
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/nn/layer/distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ class PairwiseDistance(Layer):
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
"""

Expand Down
43 changes: 26 additions & 17 deletions python/paddle/nn/layer/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,9 @@ class BCEWithLogitsLoss(Layer):
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
output = bce_logit_loss(logit, label)
print(output.numpy()) # [0.45618808]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.45618814])
"""

Expand Down Expand Up @@ -319,9 +321,11 @@ class CrossEntropyLoss(Layer):
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
input,
label)
print(dy_ret)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [5.34043430])
.. code-block:: python
Expand All @@ -339,13 +343,15 @@ class CrossEntropyLoss(Layer):
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean)
# Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [1.11043464])
"""

Expand Down Expand Up @@ -635,19 +641,22 @@ class L1Loss(Layer):
l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label)
print(output.numpy())
# [0.35]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.34999999])
l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label)
print(output.numpy())
# [1.4]
print(output)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1.39999998])
l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label)
print(output)
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
# Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.20000005, 0.19999999],
# [0.20000000, 0.79999995]])
"""

Expand Down
Loading

0 comments on commit c9c1685

Please sign in to comment.