From 53628aa92bb8dae9c93641d7a6acc1f8edc01176 Mon Sep 17 00:00:00 2001 From: lugimzzz Date: Tue, 22 Oct 2024 16:55:08 +0800 Subject: [PATCH] fix lora sharding v2 --- llm/run_finetune.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/llm/run_finetune.py b/llm/run_finetune.py index 265036207830..c32719632702 100644 --- a/llm/run_finetune.py +++ b/llm/run_finetune.py @@ -109,6 +109,7 @@ def main(): if get_env_device() == "xpu" and training_args.gradient_accumulation_steps > 1: try: from paddle_xpu.layers.nn.linear import LinearConfig # noqa: F401 + LinearConfig.enable_accumulate_steps_opt() LinearConfig.set_accumulate_steps(training_args.gradient_accumulation_steps) except ImportError: @@ -559,6 +560,8 @@ def compute_metrics_do_generation(eval_preds): gen_args=gen_args, data_args=data_args, ) + trainable_parameters = [p for p in model.parameters() if not p.stop_gradient] + trainer.set_optimizer_grouped_parameters(trainable_parameters) # Train if training_args.do_train: