Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
… dev/bf16_op_3
  • Loading branch information
zhangbo9674 committed Feb 9, 2022
2 parents f907a86 + 87f4a68 commit 62ad45d
Show file tree
Hide file tree
Showing 264 changed files with 14,159 additions and 2,934 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ tools/__pycache__
# This file is automatically generated.
# TODO(zhiqiang) Move this file to build directory.
paddle/infrt/dialect/pd_ops.td
paddle/infrt/dialect/pd_ops_info.h
.lit_test_times.txt
paddle/infrt/tests/dialect/Output
paddle/infrt/tests/lit.cfg.py
3 changes: 0 additions & 3 deletions paddle/fluid/distributed/ps/coordinator/README.md

This file was deleted.

19 changes: 9 additions & 10 deletions paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,29 +25,28 @@

#include "glog/logging.h"

static void CopyOrAddTensor(egr::EagerTensor* tensor,
const egr::EagerTensor& t) {
if (t.Var().IsInitialized()) {
const_cast<egr::EagerTensor*>(&t)->SyncToTensor();
}
static void CopyOrAddTensor(paddle::experimental::Tensor* tensor,
const paddle::experimental::Tensor& t) {
if (!tensor->defined() || !tensor->initialized()) {
// Simply copy tensor->impl
*tensor = t;
} else {
// Accumulation
paddle::imperative::TensorAdd<egr::EagerTensor>(t, tensor);
paddle::imperative::TensorAdd<paddle::experimental::Tensor>(t, tensor);
}
}

namespace egr {

void GradNodeAccumulation::RetainGrad(
const std::function<egr::EagerTensor(const egr::EagerTensor&)>& hook) {
const std::function<paddle::experimental::Tensor(
const paddle::experimental::Tensor&)>& hook) {
retain_grad_hook_ = hook;
}

std::vector<std::vector<egr::EagerTensor>> GradNodeAccumulation::operator()(
const std::vector<std::vector<egr::EagerTensor>>& grads) {
std::vector<std::vector<paddle::experimental::Tensor>> GradNodeAccumulation::
operator()(
const std::vector<std::vector<paddle::experimental::Tensor>>& grads) {
PADDLE_ENFORCE(grads.size() == 1,
paddle::platform::errors::Fatal(
"GradNodeAccumulation should take exactly 1 grad tensor"
Expand All @@ -60,7 +59,7 @@ std::vector<std::vector<egr::EagerTensor>> GradNodeAccumulation::operator()(
grads[0].size(), 0));
// Apply Gradient Hooks
if (GradientHooksRegistered()) {
std::vector<std::vector<egr::EagerTensor>> hooked_grads =
std::vector<std::vector<paddle::experimental::Tensor>> hooked_grads =
ApplyGradientHooks(grads);
// TODO(jiabin): It's little weird
CopyOrAddTensor(&accumulated_grad, hooked_grads[0][0]);
Expand Down
17 changes: 10 additions & 7 deletions paddle/fluid/eager/accumulation/accumulation_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,21 @@ class GradNodeAccumulation : public GradNodeBase {
~GradNodeAccumulation() override = default;

// Functor: perform backward computations
virtual std::vector<std::vector<egr::EagerTensor>> operator()(
const std::vector<std::vector<egr::EagerTensor>>& grads) override;
virtual std::vector<std::vector<paddle::experimental::Tensor>> operator()(
const std::vector<std::vector<paddle::experimental::Tensor>>& grads)
override;

void RetainGrad(
const std::function<egr::EagerTensor(const egr::EagerTensor&)>& hook);
void RetainGrad(const std::function<paddle::experimental::Tensor(
const paddle::experimental::Tensor&)>& hook);

egr::EagerTensor* Grad() { return &accumulated_grad; }
paddle::experimental::Tensor* Grad() { return &accumulated_grad; }

private:
egr::EagerTensor accumulated_grad;
paddle::experimental::Tensor accumulated_grad;

std::function<egr::EagerTensor(const egr::EagerTensor&)> retain_grad_hook_;
std::function<paddle::experimental::Tensor(
const paddle::experimental::Tensor&)>
retain_grad_hook_;
};

} // namespace egr
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ static void ScaleDeviceDispatch(const pten::DenseTensor& dense_tensor,
}
}

void ScaleAPI(const egr::EagerTensor& x, float scale, float bias,
bool bias_after_scale, egr::EagerTensor* out) {
void ScaleAPI(const paddle::experimental::Tensor& x, float scale, float bias,
bool bias_after_scale, paddle::experimental::Tensor* out) {
// TODO(jiabin): Support multiple tensor here, Create DenseTensor is not a
// proper way to Demo it
// Run Forward Function
Expand Down Expand Up @@ -138,14 +138,15 @@ void ScaleAPI(const egr::EagerTensor& x, float scale, float bias,
}

void GradNodeScale::SetTensorWrappers_X(
const std::vector<egr::EagerTensor>& tensors) {
const std::vector<paddle::experimental::Tensor>& tensors) {
// Does nothing for scale
}

void GradNodeScale::SetAttributes_scale(float scale) { scale_ = scale; }

std::vector<std::vector<egr::EagerTensor>> GradNodeScale::operator()(
const std::vector<std::vector<egr::EagerTensor>>& grads) {
std::vector<std::vector<paddle::experimental::Tensor>> GradNodeScale::
operator()(
const std::vector<std::vector<paddle::experimental::Tensor>>& grads) {
// 1. Check Output Size
PADDLE_ENFORCE(
((grads.size() == 1) && (grads[0].size() == 1)),
Expand All @@ -154,14 +155,14 @@ std::vector<std::vector<egr::EagerTensor>> GradNodeScale::operator()(
"However received: %d",
"This indicates an issue with Eager Dygraph Backward logic",
grads.size()));
std::vector<std::vector<egr::EagerTensor>> outs;
std::vector<std::vector<paddle::experimental::Tensor>> outs;
// 2. Create needed out parttern
egr::EagerTensor out;
paddle::experimental::Tensor out;
// Apply Gradient Hooks
if (GradientHooksRegistered()) {
// TODO(jiabin): Shall we apply hook slot by slot here or accept
// vector<vector<pten::tensor>> to apply all hooks?
std::vector<std::vector<egr::EagerTensor>> hooked_grads =
std::vector<std::vector<paddle::experimental::Tensor>> hooked_grads =
ApplyGradientHooks(grads);
ScaleAPI(/* slot by slot set */ hooked_grads[0][0], scale_, 0.0 /* bias */,
true /* bias_after_scale */, &out);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
*/
namespace egr {

void ScaleAPI(const egr::EagerTensor& x, float scale, float bias,
bool bias_after_scale, egr::EagerTensor* out);
void ScaleAPI(const paddle::experimental::Tensor& x, float scale, float bias,
bool bias_after_scale, paddle::experimental::Tensor* out);

class GradNodeScale : public GradNodeBase {
public:
Expand All @@ -38,10 +38,12 @@ class GradNodeScale : public GradNodeBase {
~GradNodeScale() override = default;

// Functor: perform backward computations
virtual std::vector<std::vector<egr::EagerTensor>> operator()(
const std::vector<std::vector<egr::EagerTensor>>& grads) override;
virtual std::vector<std::vector<paddle::experimental::Tensor>> operator()(
const std::vector<std::vector<paddle::experimental::Tensor>>& grads)
override;

void SetTensorWrappers_X(const std::vector<egr::EagerTensor>& tensors);
void SetTensorWrappers_X(
const std::vector<paddle::experimental::Tensor>& tensors);

void SetAttributes_scale(float scale);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,12 @@

namespace egr {

egr::EagerTensor scale(const egr::EagerTensor& x, float scale, float bias,
bool bias_after_scale, bool trace_backward) {
paddle::experimental::Tensor scale(const paddle::experimental::Tensor& x,
float scale, float bias,
bool bias_after_scale, bool trace_backward) {
// 1. Run Forward
// 1.1 Create outputs
egr::EagerTensor out;
paddle::experimental::Tensor out;
// 1.2 Need by original op, we assemble ins, outs, attrs here

// 1.3 Call forward C++ api
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
#include "paddle/fluid/eager/eager_tensor.h"
namespace egr {

egr::EagerTensor scale(const egr::EagerTensor& x, float scale, float bias,
bool bias_after_scale, bool trace_backward);
paddle::experimental::Tensor scale(const paddle::experimental::Tensor& x,
float scale, float bias,
bool bias_after_scale, bool trace_backward);

} // namespace egr
41 changes: 18 additions & 23 deletions paddle/fluid/eager/api/utils/hook_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,31 +23,34 @@ namespace egr {
namespace egr_utils_api {

void RegisterGradientHookForTensor(
const egr::EagerTensor& tensor,
std::function<egr::EagerTensor(const egr::EagerTensor&)>& hook) {
const paddle::experimental::Tensor& tensor,
std::function<paddle::experimental::Tensor(
const paddle::experimental::Tensor&)>& hook) {
// Find grad_node and out_rank from AutogradMeta
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(tensor);
auto rank_info = EagerUtils::unsafe_autograd_meta(tensor)->OutRankInfo();

grad_node->RegisterGradientHook(rank_info.first, rank_info.second, hook);
}

void RegisterReduceHookForTensor(const egr::EagerTensor& tensor,
void RegisterReduceHookForTensor(const paddle::experimental::Tensor& tensor,
const std::function<void(void)>& hook) {
// Find grad_node and out_rank from AutogradMeta
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(tensor);

grad_node->RegisterReduceHook(hook);
}

void RetainGradForTensor(const egr::EagerTensor& tensor) {
void RetainGradForTensor(const paddle::experimental::Tensor& tensor) {
// TODO(jiabin): Support More Tensor type here
AutogradMeta* meta = EagerUtils::unsafe_autograd_meta(tensor);
std::weak_ptr<egr::EagerTensor> weak_grad_tensor = meta->WeakGrad();
std::weak_ptr<paddle::experimental::Tensor> weak_grad_tensor =
meta->WeakGrad();

// Define Hook
std::function<egr::EagerTensor(const egr::EagerTensor&)> hook =
[weak_grad_tensor](const egr::EagerTensor& t) {
std::function<paddle::experimental::Tensor(
const paddle::experimental::Tensor&)>
hook = [weak_grad_tensor](const paddle::experimental::Tensor& t) {
if (!weak_grad_tensor.expired()) {
auto grad_tensor = weak_grad_tensor.lock();
if (t.defined()) {
Expand All @@ -56,24 +59,16 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
grad_tensor->set_impl(t.impl());
return *grad_tensor.get();
} else {
VLOG(7) << "Set Var for RetainGrad Hook for tensor: " << t.name();
PADDLE_ENFORCE_EQ(
t.Var().IsInitialized(), true,
paddle::platform::errors::Fatal(
"Detected uninitialized variable, causing segmentation "
"fault "
"inside the hook."
"Variable %s has to be initialized while we need to set it."
"please check tensor initialization status.",
t.name()));
grad_tensor->MutableVar()
->GetMutable<paddle::framework::LoDTensor>()
->ShareDataWith(t.Var().Get<paddle::framework::LoDTensor>());
return *grad_tensor.get();
PADDLE_THROW(paddle::platform::errors::Fatal(
"Detected uninitialized variable, causing segmentation "
"fault "
"inside the hook."
"Tensor has to be initialized while we need to set it."
"please check tensor initialization status."));
}
} else {
VLOG(7) << "Retain NULL EagerTensor in Grad Hook";
return EagerTensor();
VLOG(7) << "Retain NULL paddle::experimental::Tensor in Grad Hook";
return paddle::experimental::Tensor();
}
};

Expand Down
9 changes: 5 additions & 4 deletions paddle/fluid/eager/api/utils/hook_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,13 @@ namespace egr {
namespace egr_utils_api {

void RegisterGradientHookForTensor(
const egr::EagerTensor& tensor,
std::function<egr::EagerTensor(const egr::EagerTensor&)>& hook);
const paddle::experimental::Tensor& tensor,
std::function<paddle::experimental::Tensor(
const paddle::experimental::Tensor&)>& hook);

void RegisterReduceHookForTensor(const egr::EagerTensor& tensor,
void RegisterReduceHookForTensor(const paddle::experimental::Tensor& tensor,
const std::function<void(void)>& hook);
void RetainGradForTensor(const egr::EagerTensor& tensor);
void RetainGradForTensor(const paddle::experimental::Tensor& tensor);

} // namespace egr_utils_api
} // namespace egr
15 changes: 6 additions & 9 deletions paddle/fluid/eager/api/utils/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
namespace egr {
namespace egr_utils_api {

bool IsLeafTensor(const egr::EagerTensor& target) {
bool IsLeafTensor(const paddle::experimental::Tensor& target) {
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(target);
if (std::dynamic_pointer_cast<GradNodeAccumulation>(grad_node)) {
return true;
Expand All @@ -37,17 +37,14 @@ bool IsLeafTensor(const egr::EagerTensor& target) {
return false;
}

egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
const paddle::platform::Place& place,
const pten::DataType& dtype,
const pten::DataLayout& layout,
float value, bool is_leaf) {
paddle::experimental::Tensor tensor = paddle::experimental::full(
paddle::experimental::Tensor CreateTensorWithValue(
const pten::DDim& ddim, const paddle::platform::Place& place,
const pten::DataType& dtype, const pten::DataLayout& layout, float value,
bool is_leaf) {
paddle::experimental::Tensor out = paddle::experimental::full(
paddle::framework::vectorize(ddim), paddle::experimental::Scalar(value),
dtype, pten::TransToPtenBackend(place), layout);

egr::EagerTensor out = egr::EagerTensor();
out.set_tensor(std::make_shared<paddle::experimental::Tensor>(tensor));
auto meta = EagerUtils::autograd_meta(&out);
if (is_leaf) {
auto accumulation_node = std::make_shared<GradNodeAccumulation>();
Expand Down
11 changes: 5 additions & 6 deletions paddle/fluid/eager/api/utils/tensor_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,12 @@ namespace egr_utils_api {

// If and only if the tensor holds an AccumulationNode
// Then it's treated as a leaf tensor
bool IsLeafTensor(const egr::EagerTensor& target);
bool IsLeafTensor(const paddle::experimental::Tensor& target);

egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
const paddle::platform::Place& place,
const pten::DataType& dtype,
const pten::DataLayout& layout,
float value, bool is_leaf = true);
paddle::experimental::Tensor CreateTensorWithValue(
const pten::DDim& ddim, const paddle::platform::Place& place,
const pten::DataType& dtype, const pten::DataLayout& layout, float value,
bool is_leaf = true);

} // namespace egr_utils_api
} // namespace egr
Loading

1 comment on commit 62ad45d

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.