Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions paddle/fluid/framework/tensor_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -865,6 +865,32 @@ void DeleterBridge(phi::Allocation* alloc) {
}
}

phi::DataType ConvertToPDDataType(const std::string& typestr) {
static const std::unordered_map<std::string, phi::DataType> type_map = {
{"<c8", phi::DataType::COMPLEX64},
{"<c16", phi::DataType::COMPLEX128},
{"<f2", phi::DataType::BFLOAT16},
{"<f4", phi::DataType::FLOAT32},
{"<f8", phi::DataType::FLOAT64},
{"|u1", phi::DataType::UINT8},
{"|i1", phi::DataType::INT8},
{"<i2", phi::DataType::INT16},
{"<i4", phi::DataType::INT32},
{"<i8", phi::DataType::INT64},
{"|b1", phi::DataType::BOOL},
// NOTE: Paddle not support uint32, uint64, uint16 yet.
// {"<u2", phi::DataType::UINT16},
// {"<u4", phi::DataType::UINT32},
// {"<u8", phi::DataType::UINT64},
};
auto it = type_map.find(typestr);
PADDLE_ENFORCE_NE(
it,
type_map.end(),
common::errors::InvalidArgument("Unsupported typestr: " + typestr));
return it->second;
}

phi::DenseTensor from_blob(void* data,
DLManagedTensor* src,
const phi::DDim& shape,
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/framework/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ class PrintOptions {
PrintOptions() {}
};

phi::DataType ConvertToPDDataType(const std::string& typestr);

TEST_API void TensorToStream(std::ostream& os,
const phi::DenseTensor& tensor,
const phi::DeviceContext& dev_ctx);
Expand Down
98 changes: 98 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ limitations under the License. */
#include <utility>
#include <vector>

#include "paddle/common/ddim.h"
#include "paddle/fluid/framework/compiled_program.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/custom_operator.h"
Expand Down Expand Up @@ -76,9 +77,11 @@ limitations under the License. */
#include "paddle/fluid/prim/utils/utils.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/framework/reader.h"
#include "paddle/phi/core/memory/allocation/allocator_strategy.h"
#include "paddle/phi/core/raw_tensor.h"
#include "paddle/phi/core/tensor_meta.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/phi/core/memory/allocation/auto_growth_best_fit_allocator_v2.h"
#include "paddle/phi/core/memory/allocation/cuda_ipc_allocator.h"
Expand Down Expand Up @@ -1263,6 +1266,101 @@ PYBIND11_MODULE(libpaddle, m) {
return ptensor;
});

m.def("tensor_from_cuda_array_interface", [](py::object obj) {
// We use CUDA Array Interface (Version 2) protocol:
// https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html
py::object cuda_array_interface = obj.attr("__cuda_array_interface__");
PADDLE_ENFORCE_EQ(py::isinstance<py::dict>(cuda_array_interface),
true,
common::errors::InvalidArgument(
"`__cuda_array_interface` must be a dict"));
py::dict cuda_dict = cuda_array_interface.cast<py::dict>();

// Extract the `obj.__cuda_array_interface__['shape']` attribute
PADDLE_ENFORCE_EQ(
cuda_dict.contains("shape"),
true,
common::errors::InvalidArgument(
"The 'shape' key is missing in the __cuda_array_interface__ "
"dict."));
py::object shape_obj = cuda_dict["shape"];
PADDLE_ENFORCE_EQ(
py::isinstance<py::tuple>(shape_obj) ||
py::isinstance<py::list>(shape_obj),
true,
common::errors::InvalidArgument("Shape must be a tuple or list"));
std::vector<int64_t> shapes;
shapes = shape_obj.cast<std::vector<int64_t>>();
phi::IntArray shapeIntArray = phi::IntArray(shapes);

// Extract the `obj.__cuda_array_interface__['typestr'] attribute
PADDLE_ENFORCE_EQ(
cuda_dict.contains("typestr"),
true,
common::errors::InvalidArgument(
"The 'typestr' key is missing in the __cuda_array_interface__ "
"dict."));
py::object typestr_obj = cuda_dict["typestr"];
std::string typestr = typestr_obj.cast<std::string>();
phi::DataType dtype = paddle::framework::ConvertToPDDataType(typestr);

// Extract the `obj.__cuda_array_interface__['data']` attribute
PADDLE_ENFORCE_EQ(
cuda_dict.contains("data"),
true,
common::errors::InvalidArgument(
"The 'data' key is missing in the __cuda_array_interface__ "
"dict."));
py::object data_obj = cuda_dict["data"];
py::tuple data_tuple = data_obj.cast<py::tuple>();

// Data tuple(ptr_as_int, read_only_flag).
// The ptr_as_int stands for data pointer but in Python it is a integer.
// It need to be converted to a large enough integral type first
// and then convert to void*
void *data_ptr = reinterpret_cast<void *>(data_tuple[0].cast<intptr_t>());
PADDLE_ENFORCE_NE(
data_tuple[1].cast<bool>(),
true,
common::errors::InvalidArgument("Read-only array is not supported"));

// Extract the `obj.__cuda_array_interface__['strides']` attribute
phi::IntArray stridesIntArray;
if (cuda_dict.contains("strides") && !cuda_dict["strides"].is_none()) {
std::vector<int64_t> strides_vec =
cuda_dict["strides"].cast<std::vector<int64_t>>();

// __cuda_array_interface__ strides uses bytes
size_t element_size = phi::SizeOf(dtype);
for (auto &stride : strides_vec) {
PADDLE_ENFORCE_NE(
stride % element_size,
0,
common::errors::InvalidArgument(
"strides must be a multiple of the element size."));
stride /= element_size;
}
stridesIntArray = phi::IntArray(strides_vec);
} else {
DDim ddim_strides =
phi::DenseTensorMeta::calc_strides(common::make_ddim(shapes));
int rank = ddim_strides.size();
const int64_t *ddim_data = ddim_strides.Get();
std::vector<int64_t> strides_vec(ddim_data, ddim_data + rank);
stridesIntArray = phi::IntArray(strides_vec);
}
return paddle::from_blob(data_ptr,
shapeIntArray,
stridesIntArray,
dtype,
phi::DataLayout::NCHW,
phi::Place(),
[obj](void *data) {
py::gil_scoped_acquire gil;
obj.dec_ref();
});
});

m.def("_create_loaded_parameter",
[](const py::handle &vec_var_list,
const Scope &scope,
Expand Down
13 changes: 13 additions & 0 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import math
import re
import warnings
from typing import TYPE_CHECKING, Any, overload

import numpy as np
Expand Down Expand Up @@ -931,6 +932,18 @@ def to_tensor(
if place is None:
place = _current_expected_place_()
if in_dynamic_mode():
is_tensor = paddle.is_tensor(data)
if not is_tensor and hasattr(data, "__cuda_array_interface__"):
if not core.is_compiled_with_cuda():
raise RuntimeError(
"PaddlePaddle is not compiled with CUDA, but trying to create a Tensor from a CUDA array."
)
return core.tensor_from_cuda_array_interface(data)
if is_tensor:
warnings.warn(
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), "
"rather than paddle.to_tensor(sourceTensor)."
)
return _to_tensor_non_static(data, dtype, place, stop_gradient)

# call assign for static graph
Expand Down
18 changes: 18 additions & 0 deletions test/legacy_test/test_eager_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import copy
import itertools
import unittest
import warnings

import numpy as np
from utils import dygraph_guard
Expand Down Expand Up @@ -1293,6 +1294,23 @@ def test___cuda_array_interface__(self):
self.assertIn("version", interface)
self.assertEqual(interface["version"], 2)

def test_to_tensor_from___cuda_array_interface__(self):
# only test warning message here for cuda tensor of other framework is not supported in Paddle test, more tests code can be referenced: https://github.com/PaddlePaddle/Paddle/pull/69913
with dygraph_guard():
with warnings.catch_warnings(record=True) as w:
x = paddle.to_tensor([1, 2, 3])
paddle.to_tensor(x)
flag = False
for warn in w:
if (
issubclass(warn.category, UserWarning)
) and "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), rather than paddle.to_tensor(sourceTensor)." in str(
warn.message
):
flag = True
break
self.assertTrue(flag)

def test_dlpack_device(self):
"""test Tensor.__dlpack_device__"""
with dygraph_guard():
Expand Down