Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【SCU】【Paddle Tensor No.25】新增paddle.vecdot , paddle.linalg.vecdot #69477

Merged
merged 33 commits into from
Nov 27, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
9acb5b0
add_vecdot
PolaKuma Nov 18, 2024
74c2c27
update vecdot
PolaKuma Nov 18, 2024
7888577
fix codestyle
PolaKuma Nov 18, 2024
7b1e0dd
fix codestyle
PolaKuma Nov 19, 2024
f714d88
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 19, 2024
ebd6c02
Update test/legacy_test/test_linalg_vecdot.py
PolaKuma Nov 19, 2024
4193810
Update python/paddle/tensor/linalg.py
PolaKuma Nov 19, 2024
abffd22
update_vecdot
PolaKuma Nov 19, 2024
ef6edd2
fix_codestyle
PolaKuma Nov 19, 2024
2696682
fix codestyle
PolaKuma Nov 19, 2024
fe9ec10
fix codestyle
PolaKuma Nov 19, 2024
e77b6ca
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 19, 2024
9e1be50
update
PolaKuma Nov 20, 2024
b851f62
Merge branch 'develop' into add_vecdot
PolaKuma Nov 20, 2024
12c2e11
skip_xpu
PolaKuma Nov 21, 2024
64a53fe
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 21, 2024
31f669f
Update test_linalg_vecdot.py
PolaKuma Nov 21, 2024
0c014c7
fix codestyle
PolaKuma Nov 21, 2024
1a63df0
fix codestyle again
PolaKuma Nov 21, 2024
b67324c
Merge branch 'develop' into add_vecdot
PolaKuma Nov 21, 2024
a970f65
fix
PolaKuma Nov 22, 2024
3034ea1
Merge branch 'add_vecdot' of https://github.com/PolaKuma/Paddle into …
PolaKuma Nov 22, 2024
77744cb
fix
PolaKuma Nov 22, 2024
50b557d
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 22, 2024
e299388
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 25, 2024
06793f5
fix
PolaKuma Nov 25, 2024
18c5ed5
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 25, 2024
314606a
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 26, 2024
0d65079
fix again
PolaKuma Nov 26, 2024
254eb8d
Merge branch 'add_vecdot' of https://github.com/PolaKuma/Paddle into …
PolaKuma Nov 26, 2024
9e90b92
change_example
PolaKuma Nov 26, 2024
0690cd1
delete
PolaKuma Nov 26, 2024
dc52bc3
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 26, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@
t_,
transpose,
transpose_,
vecdot,
)
from .tensor.logic import (
allclose,
Expand Down Expand Up @@ -1204,4 +1205,5 @@
'positive',
'from_dlpack',
'to_dlpack',
'vecdot',
]
2 changes: 2 additions & 0 deletions python/paddle/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
svd_lowrank,
triangular_solve,
vector_norm,
vecdot,
)

__all__ = [
Expand All @@ -55,6 +56,7 @@
'norm',
'matrix_norm',
'vector_norm',
'vecdot',
'cond',
'cov',
'corrcoef',
Expand Down
1 change: 1 addition & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@
t_,
transpose,
transpose_,
vecdot,
)
from .logic import ( # noqa: F401
allclose,
Expand Down
42 changes: 42 additions & 0 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1867,6 +1867,48 @@ def dot(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
return out


def vecdot(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
"""
This operator calculates inner product for vectors.

Note:
Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix
is the batch dimension, which means that the vectors of multiple batches are dotted.

Parameters:
PolaKuma marked this conversation as resolved.
Show resolved Hide resolved
x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``, ``complex64``, ``complex128``
y(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``, ``complex64``, ``complex128``
name(str|None, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`

Returns:
Tensor: the calculated result Tensor.

Examples:

.. code-block:: python

>>> import paddle

>>> # 1-D Tensor * 1-D Tensor
>>> x = paddle.to_tensor([1, 2, 3])
>>> y = paddle.to_tensor([4, 5, 6])
>>> z = paddle.linalg.vecdot(x, y)
>>> print(z)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
32)

>>> # 2-D Tensor * 2-D Tensor
>>> x = paddle.to_tensor([[1, 2, 3], [2, 4, 6]])
>>> y = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
>>> z = paddle.linalg.vecdot(x, y)
>>> print(z)
Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
[32, 64])

"""
return dot(x, y)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

参考API规范定义,vecdot应该是逐元素乘法后再在axis轴上求和:https://data-apis.org/array-api/latest/API_specification/generated/array_api.vecdot.html#array_api.vecdot
所以可以参考如下实现(复用乘法和sum)
https://github.com/pytorch/pytorch/blob/main/torch/_refs/linalg/__init__.py#L307

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

omg sry,已修改,谢谢佬更正!



def cov(
x: Tensor,
rowvar: bool = True,
Expand Down
116 changes: 116 additions & 0 deletions test/legacy_test/test_dot_op.py
Copy link
Contributor

@HydrogenSulfate HydrogenSulfate Nov 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. 这是新增的API,不是OP,参考这几个文件写一下单测
    image

  2. 文件名建议改成test_linalg_vecdot.py

Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,122 @@ def init_dtype(self):
self.dtype = np.float64


class VecDotOp(OpTest):
def setUp(self):
self.op_type = "dot"
self.prim_op_type = "prim"
self.python_api = paddle.vecdot
self.public_python_api = paddle.vecdot
self.init_dtype()
self.init_input_output()

self.inputs = {
'X': OpTest.np_dtype_to_base_dtype(self.x),
'Y': OpTest.np_dtype_to_base_dtype(self.y),
}
self.outputs = {'Out': self.out}
self.attrs = {}

def test_check_output(self):
self.check_output(check_pir=True)

def test_check_grad_normal(self):
if self.dtype == np.complex64 or self.dtype == np.complex128:
if core.is_compiled_with_rocm():
self.check_grad(
['X', 'Y'],
'Out',
user_defined_grads=[self.inputs['Y'], self.inputs['X']],
check_pir=True,
)
else:
self.check_grad(['X', 'Y'], 'Out', check_pir=True)
else:
if core.is_compiled_with_rocm():
self.check_grad(
['X', 'Y'],
'Out',
user_defined_grads=[self.inputs['Y'], self.inputs['X']],
check_pir=True,
)
else:
self.check_grad(
['X', 'Y'], 'Out', check_pir=True, check_prim_pir=True
)

def test_check_grad_ignore_x(self):
if self.dtype == np.complex64 or self.dtype == np.complex128:
if core.is_compiled_with_rocm():
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
user_defined_grads=[self.inputs['X']],
check_pir=True,
)
else:
self.check_grad(
['Y'], 'Out', no_grad_set=set("X"), check_pir=True
)
else:
if core.is_compiled_with_rocm():
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
user_defined_grads=[self.inputs['X']],
check_pir=True,
)
else:
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
check_pir=True,
check_prim_pir=True,
)

def test_check_grad_ignore_y(self):
if self.dtype == np.complex64 or self.dtype == np.complex128:
if core.is_compiled_with_rocm():
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
user_defined_grads=[self.inputs['Y']],
check_pir=True,
)
else:
self.check_grad(
['X'], 'Out', no_grad_set=set('Y'), check_pir=True
)
else:
if core.is_compiled_with_rocm():
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
user_defined_grads=[self.inputs['Y']],
check_pir=True,
)
else:
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
check_pir=True,
check_prim_pir=True,
)

def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [121]).astype(self.dtype)
self.y = np.random.uniform(1, 3, [121]).astype(self.dtype)
self.out = np.dot(self.x, self.y).astype(self.dtype)

def init_dtype(self):
self.dtype = np.float64


class DotOpBatch(DotOp):
def init_input_output(self):
self.x = (
Expand Down