-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Eager] publish python c api for eager #37550
Changes from all commits
3847c76
a11334b
fa2bf36
3b4356b
cf6cceb
35c5d0a
6352ae1
a5b4261
0732c3f
e121ff8
5cfa2e3
ca2a4af
79d1611
5cb333a
0807b22
56a25f6
d2144f3
24ebe93
8d7d916
c14d512
f4fc5d2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,132 @@ | ||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
// disable numpy compile error | ||
#include <Python.h> | ||
|
||
#include <string> | ||
#include <vector> | ||
|
||
#include "paddle/fluid/eager/api/all.h" | ||
#include "paddle/fluid/eager/autograd_meta.h" | ||
#include "paddle/fluid/eager/utils.h" | ||
#include "paddle/fluid/memory/allocation/allocator.h" | ||
#include "paddle/fluid/memory/memcpy.h" | ||
#include "paddle/fluid/platform/enforce.h" | ||
#include "paddle/fluid/pybind/eager.h" | ||
#include "paddle/fluid/pybind/eager_utils.h" | ||
#include "paddle/pten/common/data_type.h" | ||
#include "paddle/pten/core/convert_utils.h" | ||
#include "paddle/pten/core/dense_tensor.h" | ||
#include "paddle/pten/include/core.h" | ||
#pragma GCC diagnostic ignored "-Wmissing-field-initializers" | ||
|
||
namespace paddle { | ||
namespace pybind { | ||
|
||
namespace py = ::pybind11; | ||
|
||
PyTypeObject* p_eager_tensor_type; | ||
|
||
PyObject* eagertensor_new(PyTypeObject* type, PyObject* args, | ||
PyObject* kwargs) { | ||
PyObject* obj = type->tp_alloc(type, 0); | ||
if (obj) { | ||
auto v = reinterpret_cast<EagerTensorObject*>(obj); | ||
new (&(v->eagertensor)) egr::EagerTensor(); | ||
} | ||
return obj; | ||
} | ||
|
||
static void eagertensor_dealloc(EagerTensorObject* self) { | ||
self->eagertensor.~EagerTensor(); | ||
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self)); | ||
} | ||
|
||
extern struct PyGetSetDef variable_properties[]; | ||
|
||
extern PyMethodDef variable_methods[]; | ||
|
||
PyTypeObject eager_tensor_type = { | ||
PyVarObject_HEAD_INIT(NULL, 0) "core_avx.eager.EagerTensor", /* tp_name */ | ||
sizeof(EagerTensorObject), /* tp_basicsize */ | ||
0, /* tp_itemsize */ | ||
(destructor)eagertensor_dealloc, /* tp_dealloc */ | ||
0, /* tp_vectorcall_offset */ | ||
0, /* tp_getattr */ | ||
0, /* tp_setattr */ | ||
0, /* tp_reserved */ | ||
0, /* tp_repr */ | ||
0, /* tp_as_number */ | ||
0, /* tp_as_sequence */ | ||
0, /* tp_as_mapping */ | ||
0, /* tp_hash */ | ||
0, /* tp_call */ | ||
0, /* tp_str */ | ||
0, /* tp_getattro */ | ||
0, /* tp_setattro */ | ||
0, /* tp_as_buffer */ | ||
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | | ||
Py_TPFLAGS_HEAPTYPE, /* tp_flags */ | ||
0, /* tp_doc */ | ||
0, /* tp_traverse */ | ||
0, /* tp_clear */ | ||
0, /* tp_richcompare */ | ||
0, /* tp_weaklistoffset */ | ||
0, /* tp_iter */ | ||
0, /* tp_iternext */ | ||
variable_methods, /* tp_methods */ | ||
0, /* tp_members */ | ||
variable_properties, /* tp_getset */ | ||
0, /* tp_base */ | ||
0, /* tp_dict */ | ||
0, /* tp_descr_get */ | ||
0, /* tp_descr_set */ | ||
0, /* tp_dictoffset */ | ||
0, /* tp_init */ | ||
0, /* tp_alloc */ | ||
eagertensor_new, /* tp_new */ | ||
0, /* tp_free */ | ||
0, /* tp_is_gc */ | ||
0, /* tp_bases */ | ||
0, /* tp_mro */ | ||
0, /* tp_cache */ | ||
0, /* tp_subclasses */ | ||
0, /* tp_weaklist */ | ||
0, /* tp_del */ | ||
0, /* tp_version_tag */ | ||
0 /* tp_finalize */ | ||
}; | ||
|
||
void BindEager(pybind11::module* module) { | ||
auto m = module->def_submodule("eager"); | ||
|
||
p_eager_tensor_type = &eager_tensor_type; | ||
if (PyType_Ready(&eager_tensor_type) < 0) { | ||
PADDLE_THROW(platform::errors::Fatal( | ||
"Init Paddle erroe in BindEager(PyType_Ready).")); | ||
return; | ||
} | ||
|
||
Py_INCREF(&eager_tensor_type); | ||
if (PyModule_AddObject(m.ptr(), "EagerTensor", | ||
reinterpret_cast<PyObject*>(&eager_tensor_type)) < 0) { | ||
Py_DECREF(&eager_tensor_type); | ||
Py_DECREF(m.ptr()); | ||
PADDLE_THROW(platform::errors::Fatal( | ||
"Init Paddle erroe in BindEager(PyModule_AddObject).")); | ||
return; | ||
} | ||
|
||
BindFunctions(m.ptr()); | ||
} | ||
|
||
} // namespace pybind | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
#pragma once | ||
|
||
#include <Python.h> | ||
#include "pybind11/pybind11.h" | ||
#include "pybind11/stl.h" | ||
|
||
namespace paddle { | ||
namespace pybind { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same namespace issue. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 同上 |
||
|
||
void BindEager(pybind11::module* m); | ||
void BindFunctions(PyObject* module); | ||
|
||
} // namespace pybind | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,223 @@ | ||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
// disable numpy compile error | ||
#include <Python.h> | ||
|
||
#include <string> | ||
#include <vector> | ||
|
||
#include "pybind11/numpy.h" | ||
#include "pybind11/pybind11.h" | ||
|
||
#include "paddle/fluid/eager/accumulation/accumulation_node.h" | ||
#include "paddle/fluid/eager/api/all.h" | ||
#include "paddle/fluid/eager/autograd_meta.h" | ||
#include "paddle/fluid/eager/backward.h" | ||
#include "paddle/fluid/eager/utils.h" | ||
#include "paddle/fluid/memory/allocation/allocator.h" | ||
#include "paddle/fluid/memory/memcpy.h" | ||
#include "paddle/fluid/platform/enforce.h" | ||
#include "paddle/fluid/pybind/eager.h" | ||
#include "paddle/fluid/pybind/eager_utils.h" | ||
#include "paddle/fluid/pybind/exception.h" | ||
#include "paddle/pten/api/lib/utils/allocator.h" | ||
#include "paddle/pten/api/lib/utils/storage.h" | ||
#include "paddle/pten/api/lib/utils/tensor_utils.h" | ||
#include "paddle/pten/common/data_type.h" | ||
#include "paddle/pten/core/convert_utils.h" | ||
#include "paddle/pten/core/dense_tensor.h" | ||
#include "paddle/pten/include/core.h" | ||
|
||
namespace paddle { | ||
namespace pybind { | ||
|
||
namespace py = ::pybind11; | ||
|
||
extern PyTypeObject* p_eager_tensor_type; | ||
|
||
size_t PyArray_Size_(PyObject* numpy_data) { | ||
size_t res = 1; | ||
auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions; | ||
auto nd = pybind11::detail::array_proxy(numpy_data)->nd; | ||
while (nd--) { | ||
res *= (*dims++); | ||
} | ||
return res; | ||
} | ||
|
||
class EagerNumpyAllocation : public paddle::memory::allocation::Allocation { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Plz rearrange all helper class and function to above and list all api implements below There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done, thx! |
||
public: | ||
explicit EagerNumpyAllocation(PyObject* numpy_data, pten::DataType dtype) | ||
: Allocation( | ||
static_cast<void*>(pybind11::detail::array_proxy(numpy_data)->data), | ||
pten::DataTypeSize(dtype) * PyArray_Size_(numpy_data), | ||
paddle::platform::CPUPlace()), | ||
arr_(numpy_data) { | ||
PADDLE_ENFORCE_NOT_NULL(arr_, platform::errors::InvalidArgument( | ||
"The underlying PyObject pointer of " | ||
"numpy array cannot be nullptr")); | ||
PADDLE_ENFORCE_NE( | ||
arr_, Py_None, | ||
platform::errors::PreconditionNotMet( | ||
"The underlying PyObject pointer of numpy array cannot be None")); | ||
Py_INCREF(arr_); | ||
} | ||
~EagerNumpyAllocation() override { | ||
py::gil_scoped_acquire gil; | ||
Py_DECREF(arr_); | ||
} | ||
|
||
private: | ||
PyObject* arr_; | ||
}; | ||
|
||
static PyObject* eager_api_set_expected_place(PyObject* self, PyObject* args, | ||
PyObject* kwargs) { | ||
EAGER_TRY | ||
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 0), 0); | ||
egr::Controller::Instance().SetExpectedPlace(place); | ||
|
||
Py_INCREF(Py_None); | ||
return Py_None; | ||
EAGER_CATCH_AND_THROW_RETURN_NULL | ||
} | ||
|
||
static PyObject* eager_api_scale(PyObject* self, PyObject* args, | ||
PyObject* kwargs) { | ||
EAGER_TRY | ||
// TODO(jiabin): Sync Tensor and Variable here when we support | ||
egr::EagerTensor ret = | ||
egr::scale(reinterpret_cast<EagerTensorObject*>(PyTuple_GET_ITEM(args, 0)) | ||
->eagertensor, | ||
CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1), | ||
CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2), | ||
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3), | ||
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4)); | ||
return ToPyObject(ret); | ||
EAGER_CATCH_AND_THROW_RETURN_NULL | ||
} | ||
|
||
static PyObject* eager_api_numpy_to_tensor(PyObject* numpy_data, | ||
pten::DataType dtype, | ||
const paddle::platform::Place& place, | ||
bool stop_gradient) { | ||
std::vector<int64_t> vec_dims; | ||
auto numpy_shape = pybind11::detail::array_proxy(numpy_data)->dimensions; | ||
int rank = pybind11::detail::array_proxy(numpy_data)->nd; | ||
for (int i = 0; i < rank; i++) { | ||
vec_dims.push_back(static_cast<int64_t>(numpy_shape[i])); | ||
} | ||
paddle::framework::DDim dims = paddle::framework::make_ddim(vec_dims); | ||
|
||
// TODO(jiabin): Support GPU later | ||
auto meta = pten::DenseTensorMeta(dtype, dims); | ||
auto holder = std::make_shared<EagerNumpyAllocation>(numpy_data, dtype); | ||
auto shared_storage = | ||
pten::make_intrusive<paddle::experimental::SharedStorage>(holder, 0); | ||
std::shared_ptr<pten::DenseTensor> densetensor( | ||
new pten::DenseTensor(std::move(shared_storage), std::move(meta))); | ||
|
||
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0); | ||
if (obj) { | ||
auto v = reinterpret_cast<EagerTensorObject*>(obj); | ||
new (&(v->eagertensor)) egr::EagerTensor(); | ||
v->eagertensor.set_impl(densetensor); | ||
v->eagertensor.set_name(egr::Controller::Instance().GenerateUniqueName()); | ||
auto meta = egr::EagerUtils::autograd_meta(&(v->eagertensor)); | ||
meta->SetStopGradient(stop_gradient); | ||
|
||
// Created tensor will be leaf tensor | ||
// So we append AccumulationNode to it. | ||
auto accumulation_node = std::make_shared<egr::GradNodeAccumulation>(); | ||
meta->SetGradNode(accumulation_node); | ||
|
||
// TODO(jiabin): Shall we increase ref cnt here to make python ref cnt num | ||
// correctly? | ||
} else { | ||
PADDLE_THROW(platform::errors::Fatal( | ||
"tp_alloc return null, can not new a PyObject.")); | ||
} | ||
|
||
return obj; | ||
} | ||
|
||
static PyObject* eager_api_to_tensor(PyObject* self, PyObject* args, | ||
PyObject* kwargs) { | ||
EAGER_TRY | ||
// TODO(jiabin): Support Kwargs here | ||
PyObject* data = PyTuple_GET_ITEM(args, 0); | ||
auto str_dtype = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1); | ||
pten::DataType dtype = pten::String2DataType(str_dtype); | ||
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2); | ||
bool stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3); | ||
// TODO(jiabin): Support this when python given name | ||
// auto str_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 4), 4); | ||
|
||
if (pybind11::detail::npy_api::get().PyArray_Check_(data)) { | ||
return eager_api_numpy_to_tensor(data, dtype, place, stop_gradient); | ||
} else { | ||
PADDLE_THROW(platform::errors::InvalidArgument( | ||
"Eater to_tensor only support numpy to tensor.")); | ||
Py_INCREF(Py_None); | ||
return Py_None; | ||
} | ||
EAGER_CATCH_AND_THROW_RETURN_NULL | ||
} | ||
|
||
static PyObject* eager_api_retain_grad_for_tensor(PyObject* self, | ||
PyObject* args, | ||
PyObject* kwargs) { | ||
EAGER_TRY | ||
egr::egr_utils_api::RetainGradForTensor( | ||
CastPyArg2EagerTensor(PyTuple_GET_ITEM(args, 0), 0)); | ||
Py_INCREF(Py_None); | ||
return Py_None; | ||
EAGER_CATCH_AND_THROW_RETURN_NULL | ||
} | ||
|
||
static PyObject* eager_api_run_backward(PyObject* self, PyObject* args, | ||
PyObject* kwargs) { | ||
EAGER_TRY | ||
auto tensors = CastPyArg2VectorOfEagerTensor(PyTuple_GET_ITEM(args, 0), 0); | ||
auto grad_tensors = | ||
CastPyArg2VectorOfEagerTensor(PyTuple_GET_ITEM(args, 1), 1); | ||
RunBackward(tensors, grad_tensors, | ||
CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); | ||
Py_INCREF(Py_None); | ||
return Py_None; | ||
EAGER_CATCH_AND_THROW_RETURN_NULL | ||
} | ||
|
||
PyMethodDef variable_functions[] = { | ||
{"to_tensor", (PyCFunction)(void (*)(void))eager_api_to_tensor, | ||
METH_VARARGS | METH_KEYWORDS, NULL}, | ||
{"scale", (PyCFunction)(void (*)(void))eager_api_scale, | ||
METH_VARARGS | METH_KEYWORDS, NULL}, | ||
{"_set_expected_place", | ||
(PyCFunction)(void (*)(void))eager_api_set_expected_place, | ||
METH_VARARGS | METH_KEYWORDS, NULL}, | ||
{"retain_grad_for_tensor", | ||
(PyCFunction)(void (*)(void))eager_api_retain_grad_for_tensor, | ||
METH_VARARGS | METH_KEYWORDS, NULL}, | ||
{"run_backward", (PyCFunction)(void (*)(void))eager_api_run_backward, | ||
METH_VARARGS | METH_KEYWORDS, NULL}, | ||
{NULL, NULL, 0, NULL}}; | ||
|
||
void BindFunctions(PyObject* module) { | ||
if (PyModule_AddFunctions(module, variable_functions) < 0) { | ||
PADDLE_THROW(platform::errors::Fatal( | ||
"Init Paddle erroe in BindFunctions(PyModule_AddFunctions).")); | ||
return; | ||
} | ||
} | ||
|
||
} // namespace pybind | ||
} // namespace paddle |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How about change these to a new namespace, since we will have serval api as same as imperative.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
我觉得还是这个namespace更合理,所有向python暴露的代码都放在pybind文件夹下,所有代码均用pybind namespace。