Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/Coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ jobs:
CCACHE_LIMIT_MULTIPLE: 0.8
ON_INFER: "ON"
PADDLE_CUDA_INSTALL_REQUIREMENTS: "ON"
FLAGS_enable_unused_var_check: 1
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
UT_RUN_TYPE_SETTING: WITHOUT_HYBRID
Expand Down Expand Up @@ -124,7 +123,6 @@ jobs:
-e CCACHE_LIMIT_MULTIPLE \
-e ON_INFER \
-e PADDLE_CUDA_INSTALL_REQUIREMENTS \
-e FLAGS_enable_unused_var_check \
-e GITHUB_TOKEN \
-e GITHUB_API_TOKEN \
-e UT_RUN_TYPE_SETTING \
Expand Down
18 changes: 0 additions & 18 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ class DenseTensor;

COMMON_DECLARE_bool(benchmark);
COMMON_DECLARE_bool(check_nan_inf);
PD_DECLARE_bool(enable_unused_var_check);
COMMON_DECLARE_bool(run_kp_kernel);
PHI_DECLARE_bool(enable_host_event_recorder_hook);

Expand Down Expand Up @@ -1153,8 +1152,6 @@ bool ExecutionContext::HasOutput(const std::string& name) const {
}

const Variable* ExecutionContext::InputVar(const std::string& name) const {
LogVarUsageIfUnusedVarCheckEnabled(name);

auto it = ctx_.inputs.find(name);
if (it == ctx_.inputs.end()) return nullptr;

Expand Down Expand Up @@ -1185,8 +1182,6 @@ Variable* ExecutionContext::OutputVar(const std::string& name) const {
template <>
const std::vector<const phi::DenseTensor*>
ExecutionContext::MultiInput<phi::DenseTensor>(const std::string& name) const {
LogVarUsageIfUnusedVarCheckEnabled(name);

auto vars = MultiInputVar(name);
if (vars.empty()) {
return {};
Expand Down Expand Up @@ -2046,10 +2041,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
Type(), Attrs(), infer_shape_ctx, *runtime_ctx, Id());
}

if (FLAGS_enable_unused_var_check) {
GetThreadLocalUsedVarNameSet()->clear();
}

// TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
// not Scope. Imperative mode only pass inputs and get outputs.
{
Expand Down Expand Up @@ -2121,15 +2112,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
HandleComplexGradToRealGrad(scope, runtime_ctx);
}

if (FLAGS_enable_unused_var_check) {
// skip op that uses onednn because it has different memory reuse strategy.
// use attr here because some GradMakers (like ActivationGradOpMaker) add
// input when use_mkldnn=true;
if (!(HasAttr("use_mkldnn") && Attr<bool>("use_mkldnn"))) {
CheckUnusedVar(*this, scope);
}
}

/*For profiling/benchmark only*/
if (FLAGS_benchmark) {
dev_ctx->Wait();
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -494,8 +494,6 @@ class ExecutionContext : public phi::KernelContext {

virtual const std::vector<Variable*> MultiInputVar(
const std::string& name) const {
LogVarUsageIfUnusedVarCheckEnabled(name);

auto it = ctx_.inputs.find(name);
if (it == ctx_.inputs.end()) {
return {};
Expand Down Expand Up @@ -536,8 +534,6 @@ class ExecutionContext : public phi::KernelContext {

template <typename T>
const std::vector<const T*> MultiInput(const std::string& name) const {
LogVarUsageIfUnusedVarCheckEnabled(name);

auto vars = MultiInputVar(name);
if (vars.size() == 0) {
return {};
Expand Down
12 changes: 0 additions & 12 deletions paddle/fluid/framework/unused_var_check.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h"
PHI_DEFINE_EXPORTED_bool(
enable_unused_var_check,
false,
"Checking whether operator contains unused inputs, "
"especially for grad operator. It should be in unittest.");

namespace paddle::framework {

Expand All @@ -36,13 +31,6 @@ std::unordered_set<std::string> *GetThreadLocalUsedVarNameSet() {
return &used_var_name_set;
}

void LogVarUsageIfUnusedVarCheckEnabled(const std::string &name) {
if (FLAGS_enable_unused_var_check) {
VLOG(6) << "Variable used:" << name;
GetThreadLocalUsedVarNameSet()->insert(name);
}
}

static const std::unordered_set<std::string> &GetOpWithUnusedVarAllowSet() {
// NOTE(zhiqiu): Currently, there are some operators which involves unused
// inputs and cannot be removed from the allow_list below.
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/framework/unused_var_check.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ class Scope;

std::unordered_set<std::string>* GetThreadLocalUsedVarNameSet();

void LogVarUsageIfUnusedVarCheckEnabled(const std::string& name);
void CheckUnusedVar(const OperatorBase& op, const Scope& scope);

} // namespace framework
Expand Down
19 changes: 6 additions & 13 deletions paddle/phi/core/compat/op_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,9 @@ class OpUtilsMap {
fluid_op_to_phi_kernel_.insert({op_type, base_kernel_name});
}
void InsertFluidOplName(std::string op_type, std::string base_kernel_name) {
PADDLE_ENFORCE_EQ(
phi_kernel_to_fluid_op_.count(base_kernel_name),
0UL,
common::errors::AlreadyExists(
"Operator (%s)'s kernel name (%s) has been registered.",
op_type,
base_kernel_name));
if (phi_kernel_to_fluid_op_.count(base_kernel_name)) {
return;
}
phi_kernel_to_fluid_op_.insert({base_kernel_name, op_type});
}

Expand All @@ -97,12 +93,9 @@ class OpUtilsMap {
}

void InsertArgumentMappingFn(std::string op_type, ArgumentMappingFn fn) {
PADDLE_ENFORCE_EQ(
arg_mapping_fn_map_.count(op_type),
0UL,
common::errors::AlreadyExists(
"Operator (%s)'s argument mapping function has been registered.",
op_type));
if (arg_mapping_fn_map_.count(op_type)) {
return;
}
arg_mapping_fn_map_.insert({std::move(op_type), std::move(fn)});
}

Expand Down
9 changes: 0 additions & 9 deletions paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3355,12 +3355,6 @@ function nv_test() {
}


function enable_unused_var_check() {
# NOTE(zhiqiu): Set FLAGS_enable_unused_var_check=1 here to enable unused_var_check,
# which checks if an operator has unused input variable(s).
# Currently, use it in coverage CI job.
export FLAGS_enable_unused_var_check=1
}
function check_coverage_added_ut() {
# NOTE(risemeup1):The step of checking added test can be placed on the cpu machine to save gpu resources
bash $PADDLE_ROOT/tools/check_added_ut.sh
Expand Down Expand Up @@ -4829,21 +4823,18 @@ function main() {
cicheck)
cmake_gen ${PYTHON_ABI:-""}
build ${parallel_number}
enable_unused_var_check
parallel_test
;;
cicheck_coverage)
check_diff_file_for_coverage
run_setup ${PYTHON_ABI:-""} install ${parallel_number}
enable_unused_var_check
parallel_test
check_coverage
;;
cpu_cicheck_coverage)
check_diff_file_for_coverage
export ON_INFER=ON PADDLE_CUDA_INSTALL_REQUIREMENTS=ON
run_setup ${PYTHON_ABI:-""} bdist_wheel ${parallel_number}
enable_unused_var_check
check_coverage_added_ut
check_coverage_build
clean_build_files
Expand Down
31 changes: 0 additions & 31 deletions test/cpp/fluid/framework/operator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/init.h"

PD_DECLARE_bool(enable_unused_var_check);

namespace paddle {
namespace framework {

Expand Down Expand Up @@ -612,35 +610,7 @@ REGISTER_OP_WITHOUT_GRADIENT(
REGISTER_OP_CPU_KERNEL(op_without_unused_var,
paddle::framework::OpWithoutUnusedVarKernelTest<float>);

// test with single input
TEST(OpWithUnusedVar, all) {
// enable the unused_var_check
FLAGS_enable_unused_var_check = true;
paddle::framework::InitDevices();
paddle::framework::proto::OpDesc op_desc;
op_desc.set_type("op_with_unused_var");
BuildVar("X", {"X"}, op_desc.add_inputs());
BuildVar("Y", {"Y"}, op_desc.add_outputs());

phi::CPUPlace cpu_place;
paddle::framework::Scope scope;
auto* x = scope.Var("X")->GetMutable<phi::DenseTensor>();
auto* y = scope.Var("Y")->GetMutable<phi::DenseTensor>();
x->Resize({32, 64});
y->Resize({32, 64});
x->mutable_data<float>(cpu_place);
y->mutable_data<float>(cpu_place);

auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
// should throw exception
ASSERT_THROW(op->Run(scope, cpu_place), paddle::platform::EnforceNotMet);
FLAGS_enable_unused_var_check = false;
}

TEST(OpWithoutUnusedVar, all) {
// enable the unused_var_check
FLAGS_enable_unused_var_check = true;

paddle::framework::InitDevices();
paddle::framework::proto::OpDesc op_desc;
op_desc.set_type("op_without_unused_var");
Expand All @@ -659,5 +629,4 @@ TEST(OpWithoutUnusedVar, all) {
auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
// should not throw exception
ASSERT_NO_THROW(op->Run(scope, cpu_place));
FLAGS_enable_unused_var_check = false;
}
Loading