Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hachathon No.30】 #40545

Merged
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
e5ddccf
'TripletMarginDistanceLoss'
yangguohao Mar 14, 2022
97b703f
'test_file'
yangguohao Mar 15, 2022
ff3e761
'2022_03_27'
yangguohao Mar 26, 2022
a0919de
2022-03-31
yangguohao Mar 30, 2022
a15eae3
2022-04-05
yangguohao Apr 5, 2022
7f069e7
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 5, 2022
a8fb2f9
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 8, 2022
5ad881a
2
yangguohao Apr 5, 2022
8b37721
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 17, 2022
282d91e
2022-04-17
yangguohao Apr 17, 2022
6dfe4fc
2022-04-17_2
yangguohao Apr 17, 2022
bf098a8
2022-04-17_3
yangguohao Apr 17, 2022
9afaf1c
2022-04-17_4
yangguohao Apr 17, 2022
ef649a1
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 17, 2022
1f12935
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 17, 2022
82671c8
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 18, 2022
f179733
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao Apr 25, 2022
0a9d495
2022-04-25
yangguohao Apr 25, 2022
a474af2
2022-05-02_V1
yangguohao May 2, 2022
5b48f96
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao May 2, 2022
8e8175d
2022-05-06_V1
yangguohao May 6, 2022
8c9e9d2
2022-05-07_V1
yangguohao May 7, 2022
eb0ea15
Merge branch 'PaddlePaddle:develop' into triplet_margin_distance_loss
yangguohao May 7, 2022
4b31fd1
Update loss.py
yangguohao May 9, 2022
6b9aec6
Update loss.py
yangguohao May 9, 2022
b48c998
Update loss.py
yangguohao May 9, 2022
a9959a6
Update loss.py
yangguohao May 9, 2022
a48404e
Update loss.py
yangguohao May 9, 2022
14adb6f
Update loss.py
yangguohao May 17, 2022
9e8f696
Update loss.py
yangguohao May 17, 2022
f617d14
Update loss.py
yangguohao May 17, 2022
6e58343
Update loss.py
yangguohao May 17, 2022
b0ae52a
Update loss.py
yangguohao May 20, 2022
2a4ed81
Merge branch 'develop' into triplet_margin_distance_loss
yangguohao Jun 1, 2022
a6c17b0
2022-06-01_pre-commit
yangguohao Jun 1, 2022
8b00df1
2022-06-05
yangguohao Jun 5, 2022
b3e8aea
Merge branch 'develop' into triplet_margin_distance_loss
yangguohao Jun 5, 2022
5dce717
2022-06-06
yangguohao Jun 6, 2022
8f9d5be
2022-06-07
yangguohao Jun 7, 2022
f468086
2022-06-07_V2
yangguohao Jun 7, 2022
bc5106d
Merge branch 'develop' into triplet_margin_distance_loss
yangguohao Jun 8, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,361 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import numpy as np
import unittest


def call_TripletMarginDistanceLoss_layer(input,
positive,
negative,
distance_function=None,
margin=0.3,
swap=False,
reduction='mean',):
triplet_margin_with_distance_loss = paddle.nn.TripletMarginWithDistanceLoss(distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction)
res = triplet_margin_with_distance_loss(input=input,
positive=positive,
negative=negative,)
return res


def call_TripletMaginDistanceLoss_functional(input,
positive,
negative,
distance_function = None,
margin=0.3,
swap=False,
reduction='mean',):
res = paddle.nn.functional.triplet_margin_with_distance_loss(
input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction)
return res


def test_static(place,
input_np,
positive_np,
negative_np,
distance_function=None,
margin=0.3,
swap=False,
reduction='mean',
functional=False):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float64')
positive = paddle.static.data(
name='positive', shape=positive_np.shape, dtype='float64')
negative = paddle.static.data(
name='negative', shape=negative_np.shape, dtype='float64')
feed_dict = {"input": input_np, "positive": positive_np, "negative": negative_np}

if functional:
res = call_TripletMaginDistanceLoss_functional(input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction)
else:
res = call_TripletMarginDistanceLoss_layer(input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction)

exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])

return static_result

def test_dygraph(place,
input,
positive,
negative,
distance_function=None,
margin=0.3,
swap=False,
reduction='mean',
functional=False):
paddle.disable_static()
input = paddle.to_tensor(input)
positive = paddle.to_tensor(positive)
negative = paddle.to_tensor(negative)

if functional:
dy_res = call_TripletMaginDistanceLoss_functional(input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction)
else:
dy_res = call_TripletMarginDistanceLoss_layer(input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction)
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result


def calc_triplet_margin_distance_loss(input,
positive,
negative,
distance_function=None,
margin=0.3,
swap=False,
reduction='mean',):
distance_function = np.linalg.norm
positive_dist = distance_function((input - positive), 2, axis=1)
negative_dist = distance_function((input - negative), 2, axis=1)

if swap:
swap_dist = np.linalg.norm((positive - negative), 2, axis=1)
negative_dist = np.minimum(negative_dist, swap_dist)
expected = np.maximum(positive_dist - negative_dist + margin, 0)

if reduction == 'mean':
expected = np.mean(expected)
elif reduction == 'sum':
expected = np.sum(expected)
else:
expected = expected

return expected


class TestTripletMarginWithDistanceLoss(unittest.TestCase):
def test_TripletMarginDistanceLoss(self):
input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
positive = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)
negative = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)

places = [paddle.CPUPlace()]
if paddle.device.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0))
reductions = ['sum', 'mean', 'none']
for place in places:
for reduction in reductions:
expected = calc_triplet_margin_distance_loss(input=input,
positive=positive,
negative=negative,
reduction=reduction)

dy_result = test_dygraph(place=place,
input=input,
positive=positive,
negative=negative,
reduction=reduction,)

static_result = test_static(place=place,
input_np=input,
positive_np=positive,
negative_np=negative,
reduction=reduction,)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static(place=place,
input_np=input,
positive_np=positive,
negative_np=negative,
reduction=reduction,
functional=True)
dy_functional = test_dygraph(
place=place,
input=input,
positive=positive,
negative=negative,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

增加margin<0, swap=True的测试。

def test_TripletMarginDistanceLoss_error(self):
paddle.disable_static()
self.assertRaises(
ValueError,
paddle.nn.TripletMarginWithDistanceLoss,
reduction="unsupport reduction")
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
positive = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
negative = paddle.to_tensor([[0.2, 0.1]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.triplet_margin_with_distance_loss,
input=input,
positive=positive,
negative=negative,
reduction="unsupport reduction")
paddle.enable_static()

def test_TripletMarginDistanceLoss_distance_function(self):

def distance_function_1(x1, x2):
return 1.0 - paddle.nn.functional.cosine_similarity(x1, x2)

def distance_function_2(x1, x2):
return paddle.max(paddle.abs(x1-x2), axis=1)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

测试当距离函数不满足非负性是是否会报错?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

添加了一个对求得距离的判断不能小于0


distance_function_list = [distance_function_1,distance_function_2]
input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
positive = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)
negative = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)

place = paddle.CPUPlace()
reduction = 'mean'
for distance_function in distance_function_list:
dy_result = test_dygraph(place=place,
input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
reduction=reduction,)

static_result = test_static(place=place,
input_np=input,
positive_np=positive,
negative_np=negative,
distance_function=distance_function,
reduction=reduction,)
self.assertTrue(np.allclose(static_result, dy_result))
static_functional = test_static(place=place,
input_np=input,
positive_np=positive,
negative_np=negative,
distance_function=distance_function,
reduction=reduction,
functional=True)
dy_functional = test_dygraph(
place=place,
input=input,
positive=positive,
negative=negative,
distance_function=distance_function,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, dy_functional))

def test_TripletMarginWithDistanceLoss_distance_funtion_error(self):
paddle.disable_static()

def distance_function(x1,x2):
return -1.0 - paddle.nn.functional.cosine_similarity(x1, x2)
func = distance_function
input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
positive = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)
negative = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)

self.assertRaises(
ValueError,
paddle.nn.functional.triplet_margin_with_distance_loss,
input=input,
positive=positive,
negative=negative,
distance_function=func,)
paddle.enable_static()

def test_TripletMarginDistanceLoss_dimension(self):
paddle.disable_static()

input = paddle.to_tensor([[0.1, 0.3], [1, 2]], dtype='float32')
positive = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
negative = paddle.to_tensor([[0.2, 0.1]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.triplet_margin_with_distance_loss,
input=input,
positive=positive,
negative=negative, )
triplet_margin_with_distance_loss = paddle.nn.loss.TripletMarginWithDistanceLoss()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

调用接口存在问题:
image

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

我不太清楚这是什么原因导致的。

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

最好使用paddle.nn.TripletMarginWithDistanceLoss

self.assertRaises(
ValueError,
triplet_margin_with_distance_loss,
input=input,
positive=positive,
negative=negative, )
paddle.enable_static()

def test_TripletMarginWithDistanceLoss_swap(self):
reduction = 'mean'
place = paddle.CPUPlace()
input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
positive = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)
negative = np.random.uniform(0, 2, size=(20, 30)).astype(np.float64)
expected = calc_triplet_margin_distance_loss(input=input, swap=True, positive=positive, negative=negative,
reduction=reduction)

dy_result = test_dygraph(place=place, swap=True,
input=input, positive=positive, negative=negative,
reduction=reduction, )

static_result = test_static(place=place, swap=True,
input_np=input, positive_np=positive, negative_np=negative,
reduction=reduction, )
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static(place=place, swap=True,
input_np=input, positive_np=positive, negative_np=negative,
reduction=reduction,
functional=True)
dy_functional = test_dygraph(
place=place, swap=True,
input=input, positive=positive, negative=negative,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))

def test_TripletMarginWithDistanceLoss_margin(self):
paddle.disable_static()

input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
positive = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
negative = paddle.to_tensor([[0.2, 0.1]], dtype='float32')
margin = -0.5
self.assertRaises(
ValueError,
paddle.nn.functional.triplet_margin_with_distance_loss,
margin=margin,
input=input,
positive=positive,
negative=negative, )
paddle.enable_static()


if __name__ == "__main__":
unittest.main()
2 changes: 2 additions & 0 deletions python/paddle/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@
from .layer.loss import CTCLoss # noqa: F401
from .layer.loss import SmoothL1Loss # noqa: F401
from .layer.loss import HingeEmbeddingLoss # noqa: F401
from .layer.loss import TripletMarginWithDistanceLoss
from .layer.norm import BatchNorm # noqa: F401
from .layer.norm import SyncBatchNorm # noqa: F401
from .layer.norm import GroupNorm # noqa: F401
Expand Down Expand Up @@ -313,4 +314,5 @@ def weight_norm(*args):
'MaxUnPool3D',
'HingeEmbeddingLoss',
'Identity',
'TripletMarginWithDistanceLoss'
]
2 changes: 2 additions & 0 deletions python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@
from .loss import square_error_cost # noqa: F401
from .loss import ctc_loss # noqa: F401
from .loss import hinge_embedding_loss # noqa: F401
from .loss import triplet_margin_with_distance_loss
from .norm import batch_norm # noqa: F401
from .norm import instance_norm # noqa: F401
from .norm import layer_norm # noqa: F401
Expand Down Expand Up @@ -228,4 +229,5 @@
'class_center_sample',
'sparse_attention',
'fold',
'triplet_margin_with_distance_loss',
]
Loading