diff --git a/paddlenlp/trainer/trainer.py b/paddlenlp/trainer/trainer.py index 70ce9033842b..fd71273429a8 100644 --- a/paddlenlp/trainer/trainer.py +++ b/paddlenlp/trainer/trainer.py @@ -626,7 +626,7 @@ def train( # The resume_from_checkpoint could be None in some machine node. # Here we reset None to temp directory. if args.world_size > 1: - is_resume_from_checkpoint = paddle.to_tensor([resume_from_checkpoint is not None]) + is_resume_from_checkpoint = paddle.to_tensor([resume_from_checkpoint is not None], dtype="int32") paddle.distributed.all_reduce(is_resume_from_checkpoint) is_resume_from_checkpoint = is_resume_from_checkpoint.item() if is_resume_from_checkpoint > 0 and is_resume_from_checkpoint < paddle.distributed.get_world_size():