From 413769bb8f5193a29edee65368465eb734f68937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Fri, 13 Aug 2021 16:28:14 +0200 Subject: [PATCH] Automatic string fixes (#8886) --- pytorch_lightning/core/decorators.py | 2 +- pytorch_lightning/loggers/comet.py | 3 +-- pytorch_lightning/loggers/mlflow.py | 3 +-- pytorch_lightning/loggers/wandb.py | 4 ++-- pytorch_lightning/overrides/data_parallel.py | 6 ++++-- pytorch_lightning/plugins/environments/lsf_environment.py | 2 +- pytorch_lightning/plugins/training_type/deepspeed.py | 2 +- pytorch_lightning/trainer/configuration_validator.py | 8 ++++---- .../trainer/connectors/accelerator_connector.py | 3 +-- .../trainer/connectors/checkpoint_connector.py | 4 +--- pytorch_lightning/trainer/connectors/data_connector.py | 3 +-- .../trainer/connectors/debugging_connector.py | 3 +-- pytorch_lightning/trainer/data_loading.py | 4 ++-- pytorch_lightning/trainer/properties.py | 4 ++-- pytorch_lightning/trainer/trainer.py | 3 +-- pytorch_lightning/tuner/auto_gpu_select.py | 4 ++-- pytorch_lightning/tuner/batch_size_scaling.py | 2 +- pytorch_lightning/utilities/model_summary.py | 4 ++-- pytorch_lightning/utilities/upgrade_checkpoint.py | 5 +++-- tests/tuner/test_auto_gpu_select.py | 8 ++++---- 20 files changed, 36 insertions(+), 41 deletions(-) diff --git a/pytorch_lightning/core/decorators.py b/pytorch_lightning/core/decorators.py index 864432f5db83d1..ab65b27037544b 100644 --- a/pytorch_lightning/core/decorators.py +++ b/pytorch_lightning/core/decorators.py @@ -97,7 +97,7 @@ def inner_fn(self, *args, **kwargs): if not pre_layer_count == post_layer_count: rank_zero_warn( - f"The model layers do not match after moving to the target device." + "The model layers do not match after moving to the target device." " If your model employs weight sharing on TPU," " please tie your weights using the `on_post_move_to_device` model hook.\n" f"Layer count: [Before: {pre_layer_count} After: {post_layer_count}]" diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py index 1e2f15a068bb81..1799def445ea4a 100644 --- a/pytorch_lightning/loggers/comet.py +++ b/pytorch_lightning/loggers/comet.py @@ -142,8 +142,7 @@ def __init__( ): if comet_ml is None: raise ImportError( - "You want to use `comet_ml` logger which is not installed yet," - " install it with `pip install comet-ml`." + "You want to use `comet_ml` logger which is not installed yet, install it with `pip install comet-ml`." ) super().__init__() self._experiment = None diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py index 006888c8c72d33..22e9865610bcdc 100644 --- a/pytorch_lightning/loggers/mlflow.py +++ b/pytorch_lightning/loggers/mlflow.py @@ -163,8 +163,7 @@ def experiment(self) -> MlflowClient: self.tags = self.tags or {} if MLFLOW_RUN_NAME in self.tags: log.warning( - f"The tag {MLFLOW_RUN_NAME} is found in tags. " - f"The value will be overridden by {self._run_name}." + f"The tag {MLFLOW_RUN_NAME} is found in tags. The value will be overridden by {self._run_name}." ) self.tags[MLFLOW_RUN_NAME] = self._run_name run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=resolve_tags(self.tags)) diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py index e3c31c6c4cda24..b1b5d91eef7a2c 100644 --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -117,8 +117,8 @@ def __init__( ): if wandb is None: raise ImportError( - "You want to use `wandb` logger which is not installed yet," # pragma: no-cover - " install it with `pip install wandb`." + "You want to use `wandb` logger which is not installed yet," + " install it with `pip install wandb`." # pragma: no-cover ) if offline and log_model: diff --git a/pytorch_lightning/overrides/data_parallel.py b/pytorch_lightning/overrides/data_parallel.py index 19e7d3e18552d8..ff9dc1077f2ded 100644 --- a/pytorch_lightning/overrides/data_parallel.py +++ b/pytorch_lightning/overrides/data_parallel.py @@ -27,8 +27,10 @@ def _ignore_scalar_return_in_dp(): # Users get confused by this warning so we silence it warnings.filterwarnings( "ignore", - message="Was asked to gather along dimension 0, but all input tensors were scalars;" - " will instead unsqueeze and return a vector.", + message=( + "Was asked to gather along dimension 0, but all input tensors were scalars;" + " will instead unsqueeze and return a vector." + ), ) diff --git a/pytorch_lightning/plugins/environments/lsf_environment.py b/pytorch_lightning/plugins/environments/lsf_environment.py index 997488dc358316..249cf900ab0d9d 100644 --- a/pytorch_lightning/plugins/environments/lsf_environment.py +++ b/pytorch_lightning/plugins/environments/lsf_environment.py @@ -125,7 +125,7 @@ def _read_hosts(): hosts = hosts.split() if len(hosts) < 2: raise ValueError( - "Cannot parse hosts from LSB_HOSTS environment variable." ' Expected format: "batch ..."' + 'Cannot parse hosts from LSB_HOSTS environment variable. Expected format: "batch ..."' ) return hosts diff --git a/pytorch_lightning/plugins/training_type/deepspeed.py b/pytorch_lightning/plugins/training_type/deepspeed.py index 515be43caad318..8b518de6dcdf44 100644 --- a/pytorch_lightning/plugins/training_type/deepspeed.py +++ b/pytorch_lightning/plugins/training_type/deepspeed.py @@ -350,7 +350,7 @@ def init_ddp_connection(self, global_rank: Optional[int] = None, world_size: Opt world_size = world_size if world_size is not None else self.cluster_environment.world_size() self._set_node_environment_variables(global_rank, world_size) log.info( - f"initializing deepspeed distributed: " + "initializing deepspeed distributed: " f"GLOBAL_RANK: {global_rank}, " f"MEMBER: {global_rank + 1}/{world_size}" ) diff --git a/pytorch_lightning/trainer/configuration_validator.py b/pytorch_lightning/trainer/configuration_validator.py index 3c9801d447dbd2..07548f9c49074a 100644 --- a/pytorch_lightning/trainer/configuration_validator.py +++ b/pytorch_lightning/trainer/configuration_validator.py @@ -118,13 +118,13 @@ def __verify_manual_optimization_support(self, model: "pl.LightningModule") -> N return if self.trainer.gradient_clip_val > 0: raise MisconfigurationException( - f"Automatic gradient clipping is not supported for manual optimization." + "Automatic gradient clipping is not supported for manual optimization." f" Remove `Trainer(gradient_clip_val={self.trainer.gradient_clip_val})`" - f" or switch to automatic optimization." + " or switch to automatic optimization." ) if self.trainer.accumulate_grad_batches != 1: raise MisconfigurationException( - f"Automatic gradient accumulation is not supported for manual optimization." + "Automatic gradient accumulation is not supported for manual optimization." f" Remove `Trainer(accumulate_grad_batches={self.trainer.accumulate_grad_batches})`" - f" or switch to automatic optimization." + " or switch to automatic optimization." ) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 0f8d69706a1476..999e5eecf3a6d9 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -582,8 +582,7 @@ def select_precision_plugin(self) -> PrecisionPlugin: ) if self._is_sharded_training_type or self._is_fully_sharded_training_type: raise MisconfigurationException( - "Sharded Plugin is not supported with Apex AMP," - " please using native AMP for 16-bit precision." + "Sharded Plugin is not supported with Apex AMP, please using native AMP for 16-bit precision." ) log.info("Using APEX 16bit precision.") return ApexMixedPrecisionPlugin(self.amp_level) diff --git a/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/pytorch_lightning/trainer/connectors/checkpoint_connector.py index c096f0a6093786..74b462e19a52b1 100644 --- a/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -303,9 +303,7 @@ def hpc_save(self, folderpath: str, logger): except AttributeError as err: if pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint: del checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] - rank_zero_warn( - "warning, `hyper_parameters` dropped from checkpoint." f" An attribute is not picklable {err}" - ) + rank_zero_warn(f"warning, `hyper_parameters` dropped from checkpoint. An attribute is not picklable {err}") atomic_save(checkpoint, filepath) return filepath diff --git a/pytorch_lightning/trainer/connectors/data_connector.py b/pytorch_lightning/trainer/connectors/data_connector.py index c6d471fad04b1a..9a9120be6d5934 100644 --- a/pytorch_lightning/trainer/connectors/data_connector.py +++ b/pytorch_lightning/trainer/connectors/data_connector.py @@ -53,8 +53,7 @@ def on_trainer_init( if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0): raise MisconfigurationException( - "`reload_dataloaders_every_n_epochs` should be an int >= 0," - f" got {reload_dataloaders_every_n_epochs}." + f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}." ) self.trainer.reload_dataloaders_every_n_epochs = reload_dataloaders_every_n_epochs diff --git a/pytorch_lightning/trainer/connectors/debugging_connector.py b/pytorch_lightning/trainer/connectors/debugging_connector.py index 9e11fddd9ba2fd..2f71e4627a968f 100644 --- a/pytorch_lightning/trainer/connectors/debugging_connector.py +++ b/pytorch_lightning/trainer/connectors/debugging_connector.py @@ -35,8 +35,7 @@ def on_init_start( ): if not isinstance(fast_dev_run, (bool, int)): raise MisconfigurationException( - f"fast_dev_run={fast_dev_run} is not a valid configuration." - " It should be either a bool or an int >= 0" + f"fast_dev_run={fast_dev_run} is not a valid configuration. It should be either a bool or an int >= 0" ) if isinstance(fast_dev_run, int) and (fast_dev_run < 0): diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py index 361d64569505d2..43fd65124e79bc 100644 --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -111,7 +111,7 @@ def _worker_check(self, dataloader: DataLoader, name: str) -> None: f"The dataloader, {name}, does not have many workers which may be a bottleneck." " Consider increasing the value of the `num_workers` argument`" f" (try {num_cpus} which is the number of cpus on this machine)" - f" in the `DataLoader` init to improve performance." + " in the `DataLoader` init to improve performance." ) def auto_add_worker_init_fn(self, dataloader: DataLoader) -> None: @@ -347,7 +347,7 @@ def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) - rank_zero_warn( f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval" f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if" - f" you want to see logs for the training epoch." + " you want to see logs for the training epoch." ) def _reset_eval_dataloader( diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 097138d9ad5080..5e16ba57464f5c 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -306,8 +306,8 @@ def progress_bar_dict(self) -> dict: rank_zero_warn( f"The progress bar already tracks a metric with the name(s) '{', '.join(duplicates)}' and" f" `self.log('{duplicates[0]}', ..., prog_bar=True)` will overwrite this value. " - f" If this is undesired, change the name or override `get_progress_bar_dict()`" - f" in `LightingModule`.", + " If this is undesired, change the name or override `get_progress_bar_dict()`" + " in `LightingModule`.", UserWarning, ) return {**standard_metrics, **pbar_metrics} diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index c9eca4da38c4ba..d129c21840b4c4 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1312,8 +1312,7 @@ def _log_device_info(self) -> None: if torch.cuda.is_available() and self._device_type != DeviceType.GPU: rank_zero_warn( - "GPU available but not used. Set the gpus flag in your trainer" - " `Trainer(gpus=1)` or script `--gpus=1`." + "GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`." ) if _TPU_AVAILABLE and self._device_type != DeviceType.TPU: diff --git a/pytorch_lightning/tuner/auto_gpu_select.py b/pytorch_lightning/tuner/auto_gpu_select.py index 8e0b5ad68b689e..ccf969196f0b2c 100644 --- a/pytorch_lightning/tuner/auto_gpu_select.py +++ b/pytorch_lightning/tuner/auto_gpu_select.py @@ -24,8 +24,8 @@ def pick_multiple_gpus(nb): """ if nb == 0: raise MisconfigurationException( - r"auto_select_gpus=True, gpus=0 is not a valid configuration.\ - Please select a valid number of GPU resources when using auto_select_gpus." + "auto_select_gpus=True, gpus=0 is not a valid configuration." + " Please select a valid number of GPU resources when using auto_select_gpus." ) nb = torch.cuda.device_count() if nb == -1 else nb diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py index 0bb9fd45107d28..1eda93cd831b3e 100644 --- a/pytorch_lightning/tuner/batch_size_scaling.py +++ b/pytorch_lightning/tuner/batch_size_scaling.py @@ -47,7 +47,7 @@ def scale_batch_size( rank_zero_warn( f"Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!" f" `model.{batch_arg_name}` will be used as the initial batch size for scaling." - f" If this is not the intended behavior, please remove either one." + " If this is not the intended behavior, please remove either one." ) if hasattr(model.train_dataloader, "patch_loader_code"): diff --git a/pytorch_lightning/utilities/model_summary.py b/pytorch_lightning/utilities/model_summary.py index 4834da54220ef6..ff008683152164 100644 --- a/pytorch_lightning/utilities/model_summary.py +++ b/pytorch_lightning/utilities/model_summary.py @@ -199,7 +199,7 @@ def __init__(self, model, mode: Optional[str] = None, max_depth: Optional[int] = if mode in ModelSummary.MODES: max_depth = ModelSummary.MODES[mode] rank_zero_deprecation( - f"Argument `mode` in `ModelSummary` is deprecated in v1.4" + "Argument `mode` in `ModelSummary` is deprecated in v1.4" f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behaviour." ) else: @@ -459,7 +459,7 @@ def summarize( if mode in ModelSummary.MODES: max_depth = ModelSummary.MODES[mode] rank_zero_deprecation( - f"Argument `mode` in `LightningModule.summarize` is deprecated in v1.4" + "Argument `mode` in `LightningModule.summarize` is deprecated in v1.4" f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behavior." ) model_summary = ModelSummary(lightning_module, max_depth=max_depth) diff --git a/pytorch_lightning/utilities/upgrade_checkpoint.py b/pytorch_lightning/utilities/upgrade_checkpoint.py index 4896845f102630..ddff8ee1d5ab30 100644 --- a/pytorch_lightning/utilities/upgrade_checkpoint.py +++ b/pytorch_lightning/utilities/upgrade_checkpoint.py @@ -48,8 +48,9 @@ def upgrade_checkpoint(filepath): if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Upgrade an old checkpoint to the current schema. \ - This will also save a backup of the original file." + description=( + "Upgrade an old checkpoint to the current schema. This will also save a backup of the original file." + ) ) parser.add_argument("--file", help="filepath for a checkpoint to upgrade") diff --git a/tests/tuner/test_auto_gpu_select.py b/tests/tuner/test_auto_gpu_select.py index d0f31975317647..c8b510f65e980e 100644 --- a/tests/tuner/test_auto_gpu_select.py +++ b/tests/tuner/test_auto_gpu_select.py @@ -32,8 +32,8 @@ def test_trainer_with_gpus_options_combination_at_available_gpus_env(auto_select with pytest.raises( expected_error, match=re.escape( - r"auto_select_gpus=True, gpus=0 is not a valid configuration.\ - Please select a valid number of GPU resources when using auto_select_gpus." + "auto_select_gpus=True, gpus=0 is not a valid configuration." + " Please select a valid number of GPU resources when using auto_select_gpus." ), ): Trainer(auto_select_gpus=auto_select_gpus, gpus=gpus) @@ -51,8 +51,8 @@ def test_pick_multiple_gpus(nb, expected_gpu_idxs, expected_error): with pytest.raises( expected_error, match=re.escape( - r"auto_select_gpus=True, gpus=0 is not a valid configuration.\ - Please select a valid number of GPU resources when using auto_select_gpus." + "auto_select_gpus=True, gpus=0 is not a valid configuration." + " Please select a valid number of GPU resources when using auto_select_gpus." ), ): pick_multiple_gpus(nb)