Skip to content

Commit

Permalink
Fixed python-c api
Browse files Browse the repository at this point in the history
  • Loading branch information
jim19930609 committed Nov 19, 2021
1 parent 8886d36 commit 8001ef3
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 1 deletion.
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ if(WITH_PYTHON)

cc_library(paddle_eager
SRCS eager.cc eager_functions.cc eager_method.cc eager_properties.cc eager_utils.cc
DEPS eager_api autograd_meta backward grad_node_info pten)
DEPS eager_api accumulation_node autograd_meta backward grad_node_info pten)
list(APPEND PYBIND_DEPS paddle_eager)

cc_library(paddle_pybind SHARED
Expand Down
8 changes: 8 additions & 0 deletions paddle/fluid/pybind/eager_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ limitations under the License. */
#include <string>
#include <vector>

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
Expand All @@ -30,6 +31,7 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/pten/api/lib/utils/allocator.h"
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/common/data_type.h"
Expand Down Expand Up @@ -157,6 +159,12 @@ static inline PyObject* eager_api_numpy_to_tensor(
v->eagertensor.set_name(egr::Controller::Instance().GenerateUniqueName());
auto meta = egr::EagerUtils::autograd_meta(&(v->eagertensor));
meta->SetStopGradient(stop_gradient);

// Created tensor will be leaf tensor
// So we append AccumulationNode to it.
auto accumulation_node = std::make_shared<egr::GradNodeAccumulation>();
meta->SetGradNode(accumulation_node);

// TODO(jiabin): Shall we increase ref cnt here to make python ref cnt num
// correctly?
} else {
Expand Down
11 changes: 11 additions & 0 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,17 @@ static PyObject* eager_tensor_method_numpy(EagerTensorObject* self,
place, reinterpret_cast<void*>(
(reinterpret_cast<PyArrayObject_fields*>(array))->data),
place, dense_tensor->data(), sizeof_dtype * numel);
#if defined(PADDLE_WITH_CUDA)
} else if (self->eagertensor.is_cuda()) {
auto dense_tensor =
std::dynamic_pointer_cast<pten::DenseTensor>(self->eagertensor.impl());

paddle::platform::GpuMemcpySync(
(reinterpret_cast<PyArrayObject_fields*>(array))->data,
dense_tensor->data(),
pten::DataTypeSize(dense_tensor->dtype()) * dense_tensor->numel(),
cudaMemcpyDeviceToHost);
#endif
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Tensor.numpy() only support cpu tensor."));
Expand Down
8 changes: 8 additions & 0 deletions paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,12 @@ PyObject* eager_tensor_properties_get_stop_gradient(EagerTensorObject* self,
return ToPyObject(meta->StopGradient());
}

PyObject* eager_tensor_properties_get_grad(EagerTensorObject* self,
void* closure) {
auto meta = egr::EagerUtils::unsafe_autograd_meta(self->eagertensor);
return ToPyObject(meta->Grad());
}

int eager_tensor_properties_set_stop_gradient(EagerTensorObject* self,
PyObject* value, void* closure) {
auto meta = egr::EagerUtils::unsafe_autograd_meta(self->eagertensor);
Expand Down Expand Up @@ -119,6 +125,8 @@ PyObject* eager_tensor_properties_get_dtype(EagerTensorObject* self,
}

struct PyGetSetDef variable_properties[] = {
{"grad", (getter)eager_tensor_properties_get_grad, nullptr, nullptr,
nullptr},
{"name", (getter)eager_tensor_properties_get_name,
(setter)eager_tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient,
Expand Down

0 comments on commit 8001ef3

Please sign in to comment.