diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 08c83f9f7fa9..0d0d8403b4e3 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1638,7 +1638,7 @@ def _load_rng_state(self, checkpoint): try: torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) except Exception as e: - logger.infor( + logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." )