diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index a612a7106d5f2b..d8c589435cc3b4 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -127,6 +127,7 @@ def _to_static_var(self, to_parameter=False, **kwargs): 'strides', 'offset', '__cuda_array_interface__', + '__dlpack__', ] param_keys = ['stop_gradient', 'trainable'] if isinstance(self, EagerParamBase): @@ -1330,6 +1331,52 @@ def __cuda_array_interface__(self): "version": 2, } + def __dlpack__(self, stream=None): + """ + Creates a DLPack capsule of the current tensor to be exported to other libraries. + + Args: + stream (int | None): An optional Python integer representing a pointer + to a CUDA stream. Synchronizes the tensor with this + stream before exporting. + If None or -1, no synchronization is performed. + If 0, the default stream is used. + """ + if "gpu" not in str(self.place): + raise AttributeError( + "Can't get __dlpack__ on non-CUDA tensor. " + "Use tensor.cuda() to move the tensor to device memory." + ) + + if self.is_sparse(): + raise AttributeError( + "Can't get __dlpack__ on sparse tensor. " + "Use Tensor.to_dense() to convert to a dense tensor first." + ) + + if not self.stop_gradient: + raise RuntimeError( + "Can't get __dlpack__ on Tensor that requires gradients. " + "If gradients aren't required, use tensor.detach() to get a tensor without gradient." + ) + + if stream is not None and not isinstance(stream, int): + raise TypeError("stream must be an integer or None") + + if stream is not None and stream != -1: + if self.place.is_gpu_place(): + if stream == 0: + stream = paddle.device.cuda.default_stream() + else: + stream = paddle.device.cuda.ExternalStream(stream) + current_stream = paddle.device.cuda.current_stream() + if stream != current_stream: + event = paddle.device.cuda.Event() + event.record(current_stream) + stream.wait_event(event) + + return paddle.utils.dlpack.to_dlpack(self) + if not hasattr(core, "eager"): return @@ -1374,6 +1421,7 @@ def __cuda_array_interface__(self): ("_use_gpudnn", _use_gpudnn), ("_md5sum", _md5sum), ("__cuda_array_interface__", __cuda_array_interface__), + ("__dlpack__", __dlpack__), ): setattr(core.eager.Tensor, method_name, method) diff --git a/test/dygraph_to_static/test_tensor_attr_consistency.py b/test/dygraph_to_static/test_tensor_attr_consistency.py index d39494a84a559b..54952e58942067 100644 --- a/test/dygraph_to_static/test_tensor_attr_consistency.py +++ b/test/dygraph_to_static/test_tensor_attr_consistency.py @@ -78,6 +78,7 @@ 'value', 'zero_', "__cuda_array_interface__", + "__dlpack__", ] ) STATIC_ONLY_TENSOR_ATTRS_ALLOW_LIST = OrderedSet( diff --git a/test/legacy_test/test_dlpack.py b/test/legacy_test/test_dlpack.py index 064318f19e886b..6924e72250a965 100644 --- a/test/legacy_test/test_dlpack.py +++ b/test/legacy_test/test_dlpack.py @@ -325,6 +325,15 @@ def test_to_dlpack_from_zero_size(self): np.testing.assert_array_equal(x.numpy(), y1.numpy()) np.testing.assert_array_equal(x.numpy(), y2.numpy()) + def test_dlpack_basic(self): + tensor = paddle.to_tensor([1.0, 2.0, 3.0]) + dlpack_capsule = tensor.__dlpack__() + self.assertIsNotNone(dlpack_capsule) + + converted_tensor = paddle.from_dlpack(dlpack_capsule) + self.assertTrue(paddle.equal_all(tensor, converted_tensor)) + self.assertEqual(tensor.data_ptr(), converted_tensor.data_ptr()) + class TestRaiseError(unittest.TestCase): def test_to_dlpack_raise_type_error(self): diff --git a/third_party/pybind b/third_party/pybind index a2e59f0e706540..3e9dfa28669416 160000 --- a/third_party/pybind +++ b/third_party/pybind @@ -1 +1 @@ -Subproject commit a2e59f0e7065404b44dfe92a28aca47ba1378dc4 +Subproject commit 3e9dfa2866941655c56877882565e7577de6fc7b diff --git a/tools/enforce/grep_invalid_enforce.sh b/tools/enforce/grep_invalid_enforce.sh index 04243bfb9afaf0..9e653c6b90fcc4 100644 --- a/tools/enforce/grep_invalid_enforce.sh +++ b/tools/enforce/grep_invalid_enforce.sh @@ -17,14 +17,14 @@ # This script is used to grep invalid PADDLE checks by directory or file in the paddle/fluid/, # the result show all invalid PADDLE checks in specified directory or file. -# Usage: +# Usage: # - bash grep_invalid_enforce.sh [target directory or file] (run in tools directory) # - The default check path is paddle/fluid/operators # Result Examples: # 1. grep invalid PADDLE checks in directory -# - Command: /work/paddle/tools {develop} bash grep_invalid_enforce.sh ../paddle/fluid/imperative +# - Command: /work/paddle/tools {develop} bash grep_invalid_enforce.sh ../paddle/fluid/imperative # - Results: # - paddle/fluid/imperative/gradient_accumulator.cc # PADDLE_ENFORCE_EQ(dst_tensor->numel() == numel, true, @@ -60,7 +60,7 @@ # "Place cannot be CUDAPlace when use_double_buffer is False"); # PADDLE_ENFORCE_NOT_NULL(exceptions_[i]); # PADDLE_ENFORCE_EQ(status, Status::kException); -# PADDLE_ENFORCE_EQ(status, Status::kSuccess); +# PADDLE_ENFORCE_EQ(status, Status::kSuccess); . ./count_enforce_by_file.sh --source-only