Skip to content

Commit

Permalink
[Move selected_rows PR #3] Change the relationship of [include/Cmake]. (
Browse files Browse the repository at this point in the history
PaddlePaddle#39128)

* Added selected_rows and rw_lock to pten

* Renamed the unit test target to fix CI

* Removed Class SelectedRows in Fluid, changed include/cmake relationship, use pten::SelectedRows in Fluid

* Remove rw_lock.h,rw_lock_test.cc in fluid

* Use pten::RWLock and pten::AutoRDLock, fix CI

* Use pten::SelectedRows

* Use pten::SelectedRows

* Fix to pass NPU CI

* Use pten::SelectedRows, to pass NPU CI

* To fix NPU CI

* To fix NPU CI again
  • Loading branch information
veyron95 authored Jan 25, 2022
1 parent 3825b40 commit 2bafd33
Show file tree
Hide file tree
Showing 146 changed files with 685 additions and 1,181 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/fleet.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class PSCore;

using framework::LoDTensor;
using framework::Scope;
using framework::SelectedRows;
using pten::SelectedRows;
using framework::Variable;

using RpcCtxMap = std::unordered_map<std::string, CommContext>;
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/distributed/service/brpc_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ void SerializeToMultiVarMsgAndIOBuf(

if (var->IsType<framework::LoDTensor>()) {
SerializeLodTensor(var, ctx, send_var_msg, &temp_iobuf);
} else if (var->IsType<framework::SelectedRows>()) {
} else if (var->IsType<pten::SelectedRows>()) {
SerializeSelectedRows(var, ctx, send_var_msg, &temp_iobuf);
}
iobuf->append(temp_iobuf);
Expand Down Expand Up @@ -127,7 +127,7 @@ void SerializeLodTensor(framework::Variable* var,
void SerializeSelectedRows(framework::Variable* var,
const platform::DeviceContext& ctx, VarMsg* var_msg,
butil::IOBuf* iobuf) {
framework::SelectedRows* slr = var->GetMutable<framework::SelectedRows>();
pten::SelectedRows* slr = var->GetMutable<pten::SelectedRows>();
auto* tensor = slr->mutable_value();
auto* rows = slr->mutable_rows();

Expand Down Expand Up @@ -255,7 +255,7 @@ void DeserializeSelectedRows(
butil::IOBufBytesIterator& io_buffer_itr, // NOLINT
const platform::DeviceContext& ctx) {
const auto place = ctx.GetPlace();
auto* slr = var->GetMutable<framework::SelectedRows>();
auto* slr = var->GetMutable<pten::SelectedRows>();
framework::Tensor* tensor = slr->mutable_value();
slr->set_height(msg.slr_height());
std::vector<int64_t> tmp_rows(msg.dims()[0]);
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/distributed/service/communicator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace paddle {
namespace distributed {

using framework::LoDTensor;
using framework::SelectedRows;
using pten::SelectedRows;

const uint32_t MAX_FEASIGN_NUM = 1024 * 100 * 100;

Expand Down Expand Up @@ -293,7 +293,7 @@ void Communicator::RpcSendSparse(const std::string &var_name, int table_id,
std::vector<float *> push_g_vec;

auto *send_var = scope.FindVar(var_name);
auto *tensor = send_var->GetMutable<SelectedRows>();
auto *tensor = send_var->GetMutable<pten::SelectedRows>();
auto dim = tensor->value().dims()[1];
std::transform(tensor->rows().begin(), tensor->rows().end(),
std::back_inserter(sparse_push_keys),
Expand Down Expand Up @@ -1012,10 +1012,10 @@ void GeoCommunicator::Send(const std::vector<std::string> &var_names,

auto *var = scope.FindVar(table_name);

PADDLE_ENFORCE_EQ(var->IsType<framework::SelectedRows>(), true,
PADDLE_ENFORCE_EQ(var->IsType<pten::SelectedRows>(), true,
platform::errors::InvalidArgument(
"Only need to send Sparse Grad in Geo mode."));
auto &rows = var->Get<framework::SelectedRows>().rows();
auto &rows = var->Get<pten::SelectedRows>().rows();

// insert ids which has not been record
for (size_t j = 0; j < rows.size(); j++) {
Expand Down Expand Up @@ -1290,7 +1290,7 @@ void GeoCommunicator::SendSparse(const std::string &varname,
auto cpu_ctx = paddle::platform::CPUDeviceContext();

auto *var_delta = delta_scope_->Var(varname);
auto *t_delta = var_delta->GetMutable<framework::SelectedRows>();
auto *t_delta = var_delta->GetMutable<pten::SelectedRows>();
auto *var_t_value = t_delta->mutable_value();
var_t_value->Resize({static_cast<int64_t>(sparse_ids.size()), dims1});
auto *t_value = var_t_value->mutable_data<float>(cpu_ctx.GetPlace());
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/distributed/service/communicator.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,15 +193,15 @@ inline void MergeVars(const std::string &var_name,
result.device(*cpu_ctx.eigen_device()) =
result / static_cast<T>(vars.size());
}
} else if (var0->IsType<framework::SelectedRows>()) {
auto &slr0 = var0->Get<framework::SelectedRows>();
auto *out_slr = out_var->GetMutable<framework::SelectedRows>();
} else if (var0->IsType<pten::SelectedRows>()) {
auto &slr0 = var0->Get<pten::SelectedRows>();
auto *out_slr = out_var->GetMutable<pten::SelectedRows>();
out_slr->mutable_rows()->clear();
out_slr->mutable_value()->mutable_data<T>({{}}, cpu_place);
std::vector<const paddle::framework::SelectedRows *> inputs;
std::vector<const pten::SelectedRows *> inputs;
inputs.reserve(vars.size());
for (auto &var : vars) {
inputs.push_back(&var->Get<framework::SelectedRows>());
inputs.push_back(&var->Get<pten::SelectedRows>());
}
auto dev_ctx = paddle::platform::CPUDeviceContext();
if (merge_add) {
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/distributed/table/common_graph_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,10 @@
#include "paddle/fluid/distributed/table/accessor.h"
#include "paddle/fluid/distributed/table/common_table.h"
#include "paddle/fluid/distributed/table/graph/graph_node.h"
#include "paddle/fluid/framework/rw_lock.h"
#include "paddle/fluid/string/string_helper.h"

#include "paddle/pten/core/utils/rw_lock.h"

namespace paddle {
namespace distributed {
class GraphShard {
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/distributed/table/common_sparse_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
#include "paddle/fluid/distributed/table/depends/initializers.h"
#include "paddle/fluid/distributed/table/depends/large_scale_kv.h"
#include "paddle/fluid/distributed/table/depends/sparse.h"
#include "paddle/fluid/framework/rw_lock.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/pten/core/utils/rw_lock.h"

#define PSERVER_SAVE_SUFFIX ".shard"

Expand Down Expand Up @@ -110,7 +110,7 @@ struct Meta {

class CommonSparseTable : public SparseTable {
public:
CommonSparseTable() { rwlock_.reset(new framework::RWLock); }
CommonSparseTable() { rwlock_.reset(new pten::RWLock); }
virtual ~CommonSparseTable() {}

// unused method begin
Expand Down Expand Up @@ -193,7 +193,7 @@ class CommonSparseTable : public SparseTable {
std::shared_ptr<SparseOptimizer> optimizer_;
std::vector<std::shared_ptr<ValueBlock>> shard_values_;
std::unordered_map<uint64_t, ReservoirValue<float>> pull_reservoir_;
std::unique_ptr<framework::RWLock> rwlock_{nullptr};
std::unique_ptr<pten::RWLock> rwlock_{nullptr};
};

} // namespace distributed
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/table/depends/large_scale_kv.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
#include "paddle/fluid/distributed/thirdparty/round_robin.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/rw_lock.h"
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/threadpool.h"
Expand All @@ -43,6 +42,7 @@
#include "paddle/fluid/string/printf.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/pten/backends/dynload/port.h"
#include "paddle/pten/core/utils/rw_lock.h"

namespace paddle {
namespace distributed {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/table/sparse_geo_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@
#include "paddle/fluid/distributed/table/depends/initializers.h"
#include "paddle/fluid/distributed/table/depends/large_scale_kv.h"
#include "paddle/fluid/distributed/table/depends/sparse.h"
#include "paddle/fluid/framework/rw_lock.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/pten/core/utils/rw_lock.h"

namespace paddle {
namespace distributed {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/test/brpc_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,

// var 3
framework::Variable* var3 = scope->Var("x3");
auto* slr = var3->GetMutable<framework::SelectedRows>();
auto* slr = var3->GetMutable<pten::SelectedRows>();
slr->set_height(564);
auto* tensor3 = slr->mutable_value();
auto* rows = slr->mutable_rows();
Expand Down Expand Up @@ -111,7 +111,7 @@ void RunMultiVarMsg(platform::Place place) {

// check var3
framework::Variable* var3 = scope_recv.FindVar("x3");
auto* slr = var3->GetMutable<framework::SelectedRows>();
auto* slr = var3->GetMutable<pten::SelectedRows>();
EXPECT_EQ(slr->rows().size(), 564);
for (int i = 0; i < 564; ++i) {
EXPECT_EQ(slr->rows()[i], i);
Expand Down
13 changes: 6 additions & 7 deletions paddle/fluid/eager/legacy/infer_shape_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,9 +197,8 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
out_var->GetMutable<paddle::framework::LoDTensor>();
out_lod_tensor->Resize(in_lod_tensor.dims());
} else {
auto& in_sele_rows = in_var->Get<paddle::framework::SelectedRows>();
auto out_sele_rows =
out_var->GetMutable<paddle::framework::SelectedRows>();
auto& in_sele_rows = in_var->Get<pten::SelectedRows>();
auto out_sele_rows = out_var->GetMutable<pten::SelectedRows>();
out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
out_sele_rows->set_rows(in_sele_rows.rows());
out_sele_rows->set_height(in_sele_rows.height());
Expand Down Expand Up @@ -368,8 +367,8 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
"Input variable should not be null"));
if (var->IsType<paddle::framework::LoDTensor>()) {
return var->Get<paddle::framework::LoDTensor>().dims();
} else if (var->IsType<paddle::framework::SelectedRows>()) {
return var->Get<paddle::framework::SelectedRows>().GetCompleteDims();
} else if (var->IsType<pten::SelectedRows>()) {
return var->Get<pten::SelectedRows>().GetCompleteDims();
} else {
PADDLE_THROW(paddle::platform::errors::PermissionDenied(
"Only LoDTensor/SelectedRows support 'GetDim', but Variables "
Expand All @@ -385,8 +384,8 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
void SetDim(paddle::framework::Variable* var, const DDim& dim) {
if (var->IsType<paddle::framework::LoDTensor>()) {
var->GetMutable<paddle::framework::LoDTensor>()->Resize(dim);
} else if (var->IsType<paddle::framework::SelectedRows>()) {
var->GetMutable<paddle::framework::SelectedRows>()->set_height(dim[0]);
} else if (var->IsType<pten::SelectedRows>()) {
var->GetMutable<pten::SelectedRows>()->set_height(dim[0]);
} else {
PADDLE_THROW(paddle::platform::errors::PermissionDenied(
"Variable type_id %s, expect LoDTensor/SelectedRows."));
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/legacy/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var) {
if (var.IsType<paddle::framework::LoDTensor>()) {
return &(var.Get<paddle::framework::LoDTensor>());
} else if (var.IsType<paddle::framework::SelectedRows>()) {
return &(var.Get<paddle::framework::SelectedRows>().value());
} else if (var.IsType<pten::SelectedRows>()) {
return &(var.Get<pten::SelectedRows>().value());
} else {
return nullptr;
}
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/eager/legacy/tensor_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void InitializeVariable(paddle::framework::Variable *var,
if (var_type == paddle::framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<paddle::framework::LoDTensor>();
} else if (var_type == paddle::framework::proto::VarType::SELECTED_ROWS) {
var->GetMutable<paddle::framework::SelectedRows>();
var->GetMutable<pten::SelectedRows>();
} else if (var_type == paddle::framework::proto::VarType::FEED_MINIBATCH) {
var->GetMutable<paddle::framework::FeedList>();
} else if (var_type == paddle::framework::proto::VarType::FETCH_LIST) {
Expand Down Expand Up @@ -72,9 +72,9 @@ void CopyVariable(const paddle::framework::Variable &src_var,
auto &src_tensor = src_var.Get<paddle::framework::LoDTensor>();
tmp_grad_tensor->set_lod(src_tensor.lod());
paddle::framework::TensorCopy(src_tensor, cpu_place, tmp_grad_tensor);
} else if (src_var.IsType<paddle::framework::SelectedRows>()) {
auto &src_slr = src_var.Get<paddle::framework::SelectedRows>();
auto *tmp_grad_slr = dst_var->GetMutable<paddle::framework::SelectedRows>();
} else if (src_var.IsType<pten::SelectedRows>()) {
auto &src_slr = src_var.Get<pten::SelectedRows>();
auto *tmp_grad_slr = dst_var->GetMutable<pten::SelectedRows>();
tmp_grad_slr->set_rows(src_slr.rows());
tmp_grad_slr->set_height(src_slr.height());
auto &src_t = src_slr.value();
Expand All @@ -89,8 +89,8 @@ paddle::framework::proto::VarType::Type GetDtypeFromVar(
const paddle::framework::Variable &var) {
if (var.IsType<paddle::framework::LoDTensor>()) {
return var.Get<paddle::framework::LoDTensor>().type();
} else if (var.IsType<paddle::framework::SelectedRows>()) {
return var.Get<paddle::framework::SelectedRows>().value().type();
} else if (var.IsType<pten::SelectedRows>()) {
return var.Get<pten::SelectedRows>().value().type();
} else {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Variable type is %s, expect LoDTensor or SelectedRows.",
Expand All @@ -101,8 +101,8 @@ const paddle::platform::Place &GetPlaceFromVar(
const paddle::framework::Variable &var) {
if (var.IsType<paddle::framework::LoDTensor>()) {
return var.Get<paddle::framework::LoDTensor>().place();
} else if (var.IsType<paddle::framework::SelectedRows>()) {
return var.Get<paddle::framework::SelectedRows>().place();
} else if (var.IsType<pten::SelectedRows>()) {
return var.Get<pten::SelectedRows>().place();
} else {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Variable type is %s, expect LoDTensor or SelectedRows.",
Expand Down
6 changes: 1 addition & 5 deletions paddle/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ cc_library(prune SRCS prune.cc DEPS framework_proto boost)
cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context)
cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry
proto_desc)
cc_library(selected_rows_utils SRCS selected_rows_utils.cc DEPS tensor)
cc_library(selected_rows_utils SRCS selected_rows_utils.cc DEPS selected_rows)
cc_test(selected_rows_utils_test SRCS selected_rows_utils_test.cc DEPS selected_rows_utils)

cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto op_kernel_type)
Expand All @@ -393,10 +393,6 @@ cc_test(tuple_test SRCS tuple_test.cc )

cc_test(inlined_vector_test SRCS inlined_vector_test.cc)

if (NOT WIN32)
cc_test(rw_lock_test SRCS rw_lock_test.cc)
endif (NOT WIN32)

cc_library(dlpack_tensor SRCS dlpack_tensor.cc DEPS tensor dlpack)
cc_test(dlpack_tensor_test SRCS dlpack_tensor_test.cc DEPS dlpack_tensor glog)

Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/data_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ void SetTensorToVariable(const Variable &in_var, const Tensor &tensor,
tran_lod_tensor->set_format(in_lod_tensor.format());
#endif
tran_lod_tensor->ShareDataWith(tensor);
} else if (in_var.IsType<SelectedRows>()) {
auto &in_selected_rows = in_var.Get<SelectedRows>();
auto *trans_selected_rows = out_var->GetMutable<SelectedRows>();
} else if (in_var.IsType<pten::SelectedRows>()) {
auto &in_selected_rows = in_var.Get<pten::SelectedRows>();
auto *trans_selected_rows = out_var->GetMutable<pten::SelectedRows>();
trans_selected_rows->set_height(in_selected_rows.height());
trans_selected_rows->set_rows(in_selected_rows.rows());
trans_selected_rows->mutable_value()->ShareDataWith(tensor);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/broadcast_op_handle_test.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ struct TestBroadcastOpHandle {
PADDLE_ENFORCE_NOT_NULL(
var, platform::errors::NotFound("Variable %s is not found in scope.",
varname));
auto selected_rows = var->GetMutable<f::SelectedRows>();
auto selected_rows = var->GetMutable<pten::SelectedRows>();
auto value = selected_rows->mutable_value();
value->mutable_data<float>(kDims, place_list_[input_scope_idx]);
selected_rows->set_height(height);
Expand All @@ -256,7 +256,7 @@ struct TestBroadcastOpHandle {
PADDLE_ENFORCE_NOT_NULL(
var, platform::errors::NotFound("Variable %s is not found in scope.",
varname));
auto& selected_rows = var->Get<f::SelectedRows>();
auto& selected_rows = var->Get<pten::SelectedRows>();
auto rt = selected_rows.value();
PADDLE_ENFORCE_EQ(selected_rows.height(), height,
platform::errors::InvalidArgument(
Expand Down
7 changes: 4 additions & 3 deletions paddle/fluid/framework/details/eager_deletion_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,10 @@ void EagerDeletionOpHandle::RunImpl() {

if (var->IsType<LoDTensor>()) {
garbages.emplace_back(var->GetMutable<LoDTensor>()->MoveMemoryHolder());
} else if (var->IsType<SelectedRows>()) {
garbages.emplace_back(
var->GetMutable<SelectedRows>()->mutable_value()->MoveMemoryHolder());
} else if (var->IsType<pten::SelectedRows>()) {
garbages.emplace_back(var->GetMutable<pten::SelectedRows>()
->mutable_value()
->MoveMemoryHolder());
} else if (var->IsType<LoDTensorArray>()) {
auto *tensor_arr = var->GetMutable<LoDTensorArray>();
for (auto &t : *tensor_arr) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/details/gather_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,14 @@ void GatherOpHandle::RunImpl() {
platform::errors::NotFound("The variable '%s' is not found in the scope.",
in_0_handle->name()));

PADDLE_ENFORCE_EQ(pre_in_var->IsType<framework::SelectedRows>(), true,
PADDLE_ENFORCE_EQ(pre_in_var->IsType<pten::SelectedRows>(), true,
platform::errors::Unimplemented(
"Currently, gather_op only supports SelectedRows."));

// Wait input done, this Wait is asynchronous operation
WaitInputVarGenerated();

auto &pre_in_value = pre_in_var->Get<framework::SelectedRows>();
auto &pre_in_value = pre_in_var->Get<pten::SelectedRows>();
std::vector<int64_t> out_rows;
std::vector<Tensor> in_tensors;

Expand All @@ -85,7 +85,7 @@ void GatherOpHandle::RunImpl() {
"The variable '%s' is not found in the scope.", in_handle->name()));
VariableVisitor::EnforceShapeAndDTypeEQ(*in_var, *pre_in_var);

auto &in_sr_value = in_var->Get<framework::SelectedRows>();
auto &in_sr_value = in_var->Get<pten::SelectedRows>();

auto &in_sr_rows = in_sr_value.rows();
out_rows.insert(out_rows.end(), in_sr_rows.begin(), in_sr_rows.end());
Expand All @@ -108,7 +108,7 @@ void GatherOpHandle::RunImpl() {
out_var,
platform::errors::NotFound("The variable '%s' is not found in the scope.",
out_var_handle->name()));
auto out_value = out_var->GetMutable<framework::SelectedRows>();
auto out_value = out_var->GetMutable<pten::SelectedRows>();
out_value->set_height(pre_in_value.height());
out_value->set_rows(out_rows);
size_t rows = out_rows.size();
Expand Down
Loading

0 comments on commit 2bafd33

Please sign in to comment.