Skip to content

Commit 33ba281

Browse files
authored
[0-size Tensor No.165、218] Add 0-size Tensor support for paddle.nn.functional.cosine_similarity [fluid_ops]
1 parent b459dbd commit 33ba281

File tree

3 files changed

+107
-0
lines changed

3 files changed

+107
-0
lines changed

python/paddle/nn/functional/common.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2229,6 +2229,8 @@ def cosine_similarity(
22292229
[ 0.97689527, 0.99996042, -0.55138415])
22302230
22312231
"""
2232+
if x1.shape[axis] == 0 or x2.shape[axis] == 0:
2233+
return sum(paddle.multiply(x1, x2), axis=axis)
22322234
bs = paddle.broadcast_shape([x1.shape[axis]], [x2.shape[axis]])
22332235
w12 = sum(paddle.multiply(x1, x2), axis=axis)
22342236
w1 = sum(paddle.multiply(x1, x1), axis=axis)

test/legacy_test/test_cosine_similarity_api.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,5 +163,44 @@ def test_dygraph_5(self):
163163
np.testing.assert_allclose(y.numpy(), np_out, rtol=1e-05)
164164

165165

166+
class TestCosineSimilarityAPI_ZeroSize(unittest.TestCase):
167+
def setUp(self):
168+
self.places = get_places()
169+
170+
def _get_numpy_out(self, x1, x2, axis=1, eps=1e-8):
171+
bs = np.broadcast_shapes([x1.shape[axis]], [x2.shape[axis]])
172+
w12 = np.sum(x1 * x2, axis=axis)
173+
w1 = np.sum(x1 * x1, axis=axis)
174+
w2 = np.sum(x2 * x2, axis=axis)
175+
m1, m2 = bs[0] / x1.shape[axis], bs[0] / x2.shape[axis]
176+
if m1 != 1:
177+
w1 = w1 * m1
178+
if m2 != 1:
179+
w2 = w2 * m2
180+
n12 = np.sqrt(np.clip(w1 * w2, eps * eps, None))
181+
cos_sim = w12 / n12
182+
return cos_sim
183+
184+
def test_dygraph_1(self):
185+
paddle.disable_static()
186+
187+
shape = [0, 15]
188+
axis = 1
189+
eps = 1e-8
190+
np.random.seed(1)
191+
np_x1 = np.random.rand(*shape).astype(np.float32)
192+
np_x2 = np.random.rand(*shape).astype(np.float32)
193+
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
194+
195+
tensor_x1 = paddle.to_tensor(np_x1)
196+
tensor_x1.stop_gradient = False
197+
tensor_x2 = paddle.to_tensor(np_x2)
198+
y = F.cosine_similarity(tensor_x1, tensor_x2, axis=axis, eps=eps)
199+
200+
np.testing.assert_allclose(y.numpy(), np_out, rtol=1e-05)
201+
y.sum().backward()
202+
np.testing.assert_allclose(tensor_x1.grad.shape, tensor_x1.shape)
203+
204+
166205
if __name__ == '__main__':
167206
unittest.main()

test/legacy_test/test_sigmoid_focal_loss.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,5 +191,71 @@ def test_SigmoidFocalLoss_error(self):
191191
paddle.enable_static()
192192

193193

194+
class TestSigmoidFocalLoss_ZeroSize(unittest.TestCase):
195+
196+
def _test_dygraph(
197+
self,
198+
place,
199+
logit_np,
200+
label_np,
201+
normalizer_np,
202+
alpha=0.25,
203+
gamma=2.0,
204+
reduction='sum',
205+
):
206+
paddle.disable_static()
207+
logit = paddle.to_tensor(logit_np)
208+
logit.stop_gradient = False
209+
label = paddle.to_tensor(label_np)
210+
normalizer = None
211+
if normalizer_np is not None:
212+
normalizer = paddle.to_tensor(normalizer_np)
213+
dy_res = call_sfl_functional(
214+
logit, label, normalizer, alpha, gamma, reduction
215+
)
216+
dy_res.sum().backward()
217+
np.testing.assert_allclose(logit.grad.shape, logit.shape)
218+
dy_result = dy_res.numpy()
219+
paddle.enable_static()
220+
return dy_result
221+
222+
def test_SigmoidFocalLoss(self):
223+
logit_np = np.random.uniform(0.1, 0.8, size=(0, 3, 4, 10)).astype(
224+
np.float64
225+
)
226+
label_np = np.random.randint(0, 2, size=(0, 3, 4, 10)).astype(
227+
np.float64
228+
)
229+
normalizer_nps = [
230+
np.asarray([np.sum(label_np > 0)], dtype=label_np.dtype),
231+
None,
232+
]
233+
places = get_places()
234+
reductions = ['sum']
235+
alpha = 0.25
236+
gamma = 3
237+
for place in places:
238+
for reduction in reductions:
239+
for normalizer_np in normalizer_nps:
240+
dy_result = self._test_dygraph(
241+
place,
242+
logit_np,
243+
label_np,
244+
normalizer_np,
245+
alpha,
246+
gamma,
247+
reduction,
248+
)
249+
expected = calc_sigmoid_focal_loss(
250+
logit_np,
251+
label_np,
252+
normalizer_np,
253+
alpha,
254+
gamma,
255+
reduction,
256+
)
257+
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
258+
259+
194260
if __name__ == "__main__":
195261
unittest.main()

0 commit comments

Comments
 (0)