Skip to content

Commit

Permalink
fix place bug, add test case
Browse files Browse the repository at this point in the history
  • Loading branch information
wanghuancoder committed Nov 19, 2021
1 parent a9893d0 commit fbde0b1
Show file tree
Hide file tree
Showing 7 changed files with 99 additions and 17 deletions.
3 changes: 1 addition & 2 deletions paddle/fluid/pybind/eager_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,7 @@ static PyObject* eager_api_to_tensor(PyObject* self, PyObject* args,
PyObject* data = PyTuple_GET_ITEM(args, 0);
auto str_dtype = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
pten::DataType dtype = pten::String2DataType(str_dtype);
auto place =
::pybind11::handle(PyTuple_GET_ITEM(args, 2)).cast<platform::Place>();
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2);
bool stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
// TODO(jiabin): Support this when python given name
// auto str_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 4), 4);
Expand Down
36 changes: 36 additions & 0 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,13 @@ namespace pybind {

extern PyTypeObject* p_eager_tensor_type;

extern PyTypeObject* g_place_pytype;
extern PyTypeObject* g_cudaplace_pytype;
extern PyTypeObject* g_cpuplace_pytype;
extern PyTypeObject* g_xpuplace_pytype;
extern PyTypeObject* g_npuplace_pytype;
extern PyTypeObject* g_cudapinnedplace_pytype;

bool PyObject_CheckLongOrConvertToLong(PyObject** obj) {
if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) {
return true;
Expand Down Expand Up @@ -187,6 +194,35 @@ std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
return result;
}

platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
platform::Place place;
if (PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(g_place_pytype))) {
place = ::pybind11::handle(obj).cast<platform::Place>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cudaplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CUDAPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cpuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_xpuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::XPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_npuplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::NPUPlace>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_cudapinnedplace_pytype))) {
place = ::pybind11::handle(obj).cast<platform::CUDAPinnedPlace>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"one of(Place,CUDAPlace,CPUPlace,XPUPlace,NPUPlace,CUDAPinnedPlace), "
"but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return place;
}

PyObject* ToPyObject(bool value) {
if (value) {
Py_INCREF(Py_True);
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos);
egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos);
std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
ssize_t arg_pos);
platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos);

PyObject* ToPyObject(int value);
PyObject* ToPyObject(bool value);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -874,7 +874,7 @@ void BindImperative(py::module *m_ptr) {

py::class_<imperative::VarBase, std::shared_ptr<imperative::VarBase>> varbase(
m, "VarBase", R"DOC()DOC");
g_varbase_pytype = (PyTypeObject *)varbase.ptr(); // NOLINT
g_varbase_pytype = reinterpret_cast<PyTypeObject *>(varbase.ptr());
varbase.def_static("_alive_vars", &imperative::VarBase::AliveVarNames)
.def("__init__",
[](imperative::VarBase &self) {
Expand Down
46 changes: 33 additions & 13 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,14 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::FetchType);

namespace paddle {
namespace pybind {

PyTypeObject *g_place_pytype = nullptr;
PyTypeObject *g_cudaplace_pytype = nullptr;
PyTypeObject *g_cpuplace_pytype = nullptr;
PyTypeObject *g_xpuplace_pytype = nullptr;
PyTypeObject *g_npuplace_pytype = nullptr;
PyTypeObject *g_cudapinnedplace_pytype = nullptr;

bool IsCompiledWithCUDA() {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
return false;
Expand Down Expand Up @@ -1600,7 +1608,7 @@ All parameter, weight, gradient are variables in Paddle.
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
py::class_<platform::CUDAPlace>(m, "CUDAPlace", R"DOC(
py::class_<platform::CUDAPlace> cudaplace(m, "CUDAPlace", R"DOC(
CUDAPlace is a descriptor of a device.
It represents a GPU device allocated or to be allocated with Tensor or LoDTensor.
Expand All @@ -1623,7 +1631,9 @@ All parameter, weight, gradient are variables in Paddle.
place = paddle.CUDAPlace(0)
)DOC")
)DOC");
g_cudaplace_pytype = reinterpret_cast<PyTypeObject *>(cudaplace.ptr());
cudaplace
.def("__init__",
[](platform::CUDAPlace &self, int dev_id) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Expand Down Expand Up @@ -1681,13 +1691,15 @@ All parameter, weight, gradient are variables in Paddle.
.def("__repr__", string::to_string<const platform::CUDAPlace &>)
.def("__str__", string::to_string<const platform::CUDAPlace &>);

py::class_<platform::XPUPlace>(m, "XPUPlace", R"DOC(
py::class_<platform::XPUPlace> xpuplace(m, "XPUPlace", R"DOC(
**Note**:
Examples:
.. code-block:: python
import paddle.fluid as fluid
xpu_place = fluid.XPUPlace(0)
)DOC")
)DOC");
g_xpuplace_pytype = reinterpret_cast<PyTypeObject *>(xpuplace.ptr());
xpuplace
.def("__init__",
[](platform::XPUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_XPU
Expand Down Expand Up @@ -1757,7 +1769,7 @@ All parameter, weight, gradient are variables in Paddle.
});
#endif

py::class_<paddle::platform::CPUPlace>(m, "CPUPlace", R"DOC(
py::class_<paddle::platform::CPUPlace> cpuplace(m, "CPUPlace", R"DOC(
CPUPlace is a descriptor of a device.
It represents a CPU device on which a tensor will be allocated and a model will run.
Expand All @@ -1767,8 +1779,9 @@ All parameter, weight, gradient are variables in Paddle.
import paddle
cpu_place = paddle.CPUPlace()
)DOC")
.def(py::init<>())
)DOC");
g_cpuplace_pytype = reinterpret_cast<PyTypeObject *>(cpuplace.ptr());
cpuplace.def(py::init<>())
.def("_type", &PlaceIndex<platform::CPUPlace>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::Place>)
.def("_equals", &IsSamePlace<platform::CPUPlace, platform::XPUPlace>)
Expand All @@ -1780,7 +1793,8 @@ All parameter, weight, gradient are variables in Paddle.
.def("__repr__", string::to_string<const platform::CPUPlace &>)
.def("__str__", string::to_string<const platform::CPUPlace &>);

py::class_<paddle::platform::CUDAPinnedPlace>(m, "CUDAPinnedPlace", R"DOC(
py::class_<paddle::platform::CUDAPinnedPlace> cudapinnedplace(
m, "CUDAPinnedPlace", R"DOC(
CUDAPinnedPlace is a descriptor of a device.
It refers to the page locked memory allocated by the CUDA function `cudaHostAlloc()` in the host memory.
The host operating system will not paging and exchanging the memory.
Expand All @@ -1794,7 +1808,10 @@ All parameter, weight, gradient are variables in Paddle.
import paddle
place = paddle.CUDAPinnedPlace()
)DOC")
)DOC");
g_cudapinnedplace_pytype =
reinterpret_cast<PyTypeObject *>(cudapinnedplace.ptr());
cudapinnedplace
.def("__init__",
[](platform::CUDAPinnedPlace &self) {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
Expand All @@ -1820,7 +1837,7 @@ All parameter, weight, gradient are variables in Paddle.
.def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);

// NPUPlace
py::class_<platform::NPUPlace>(m, "NPUPlace", R"DOC(
py::class_<platform::NPUPlace> npuplace(m, "NPUPlace", R"DOC(
NPUPlace is a descriptor of a device.
It represents a NPU device on which a tensor will be allocated and a model will run.
Expand All @@ -1829,7 +1846,9 @@ All parameter, weight, gradient are variables in Paddle.
import paddle
npu_place = paddle.NPUPlace(0)
)DOC")
)DOC");
g_npuplace_pytype = reinterpret_cast<PyTypeObject *>(npuplace.ptr());
npuplace
.def("__init__",
[](platform::NPUPlace &self, int dev_id) {
#ifdef PADDLE_WITH_ASCEND_CL
Expand Down Expand Up @@ -1880,8 +1899,9 @@ All parameter, weight, gradient are variables in Paddle.
[](const platform::NPUPlace &self) { return self.GetDeviceId(); })
.def("__str__", string::to_string<const platform::NPUPlace &>);

py::class_<platform::Place>(m, "Place")
.def(py::init<>())
py::class_<platform::Place> platformplace(m, "Place");
g_place_pytype = reinterpret_cast<PyTypeObject *>(platformplace.ptr());
platformplace.def(py::init<>())
.def("_type", &PlaceIndex<platform::Place>)
.def("_equals", &IsSamePlace<platform::Place, platform::Place>)
.def("_equals", &IsSamePlace<platform::Place, platform::CUDAPlace>)
Expand Down
2 changes: 1 addition & 1 deletion paddle/pten/core/convert_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ int TensorDtype2NumpyDtype(pten::DataType dtype) {
case pten::DataType::INT64:
return NPY_TYPES::NPY_INT64;
case pten::DataType::FLOAT16:
return NPY_TYPES::NPY_FLOAT; // numpy not have float16
return NPY_TYPES::NPY_FLOAT16;
case pten::DataType::FLOAT32:
return NPY_TYPES::NPY_FLOAT;
case pten::DataType::FLOAT64:
Expand Down
26 changes: 26 additions & 0 deletions python/paddle/fluid/tests/unittests/test_egr_python_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,29 @@ def test_scale_base(self):
tensor.persistable = False
self.assertEqual(tensor.persistable, False)
self.assertTrue(tensor.place.is_cpu_place())


class EagerDtypeTestCase(unittest.TestCase):
def check_to_tesnsor_and_numpy(self, dtype):
with eager_guard():
arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, dtype)
self.assertTrue(np.array_equal(arr, tensor.numpy()))

def test_dtype_base(self):
self.check_to_tesnsor_and_numpy('bool')
self.check_to_tesnsor_and_numpy('int8')
self.check_to_tesnsor_and_numpy('uint8')
self.check_to_tesnsor_and_numpy('int16')
self.check_to_tesnsor_and_numpy('int32')
self.check_to_tesnsor_and_numpy('int64')
self.check_to_tesnsor_and_numpy('float16')
self.check_to_tesnsor_and_numpy('float32')
self.check_to_tesnsor_and_numpy('float64')
self.check_to_tesnsor_and_numpy('complex64')
self.check_to_tesnsor_and_numpy('complex128')


if __name__ == "__main__":
unittest.main()

0 comments on commit fbde0b1

Please sign in to comment.