Skip to content

Commit

Permalink
bug fix (PaddlePaddle#8238)
Browse files Browse the repository at this point in the history
  • Loading branch information
FeixLiu authored Apr 9, 2024
1 parent 70f4a6f commit c1aad02
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion paddlenlp/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,9 +413,9 @@ def _wrap_amp_model(self, args, model):
self.scaler = paddle.amp.GradScaler(init_loss_scaling=self.args.scale_loss)
if self.amp_dtype == "float16" or self.amp_dtype == "bfloat16":
if ShardingOption.SHARD_OP in self.args.sharding:
self.scaler = fleet.distributed_scaler(self.scaler)
if self.args.amp_master_grad:
mix_precision_utils.MixPrecisionScaler(self.scaler) # retun value has no use
self.scaler = fleet.distributed_scaler(self.scaler)
else:
# scaler for stage2 and stage3
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
Expand Down

0 comments on commit c1aad02

Please sign in to comment.