Skip to content

Commit

Permalink
Automatic string fixes (Lightning-AI#8886)
Browse files Browse the repository at this point in the history
  • Loading branch information
carmocca authored and four4fish committed Aug 16, 2021
1 parent 7cd7b9b commit 413769b
Show file tree
Hide file tree
Showing 20 changed files with 36 additions and 41 deletions.
2 changes: 1 addition & 1 deletion pytorch_lightning/core/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def inner_fn(self, *args, **kwargs):

if not pre_layer_count == post_layer_count:
rank_zero_warn(
f"The model layers do not match after moving to the target device."
"The model layers do not match after moving to the target device."
" If your model employs weight sharing on TPU,"
" please tie your weights using the `on_post_move_to_device` model hook.\n"
f"Layer count: [Before: {pre_layer_count} After: {post_layer_count}]"
Expand Down
3 changes: 1 addition & 2 deletions pytorch_lightning/loggers/comet.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ def __init__(
):
if comet_ml is None:
raise ImportError(
"You want to use `comet_ml` logger which is not installed yet,"
" install it with `pip install comet-ml`."
"You want to use `comet_ml` logger which is not installed yet, install it with `pip install comet-ml`."
)
super().__init__()
self._experiment = None
Expand Down
3 changes: 1 addition & 2 deletions pytorch_lightning/loggers/mlflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,7 @@ def experiment(self) -> MlflowClient:
self.tags = self.tags or {}
if MLFLOW_RUN_NAME in self.tags:
log.warning(
f"The tag {MLFLOW_RUN_NAME} is found in tags. "
f"The value will be overridden by {self._run_name}."
f"The tag {MLFLOW_RUN_NAME} is found in tags. The value will be overridden by {self._run_name}."
)
self.tags[MLFLOW_RUN_NAME] = self._run_name
run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=resolve_tags(self.tags))
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/loggers/wandb.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,8 @@ def __init__(
):
if wandb is None:
raise ImportError(
"You want to use `wandb` logger which is not installed yet," # pragma: no-cover
" install it with `pip install wandb`."
"You want to use `wandb` logger which is not installed yet,"
" install it with `pip install wandb`." # pragma: no-cover
)

if offline and log_model:
Expand Down
6 changes: 4 additions & 2 deletions pytorch_lightning/overrides/data_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@ def _ignore_scalar_return_in_dp():
# Users get confused by this warning so we silence it
warnings.filterwarnings(
"ignore",
message="Was asked to gather along dimension 0, but all input tensors were scalars;"
" will instead unsqueeze and return a vector.",
message=(
"Was asked to gather along dimension 0, but all input tensors were scalars;"
" will instead unsqueeze and return a vector."
),
)


Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/plugins/environments/lsf_environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def _read_hosts():
hosts = hosts.split()
if len(hosts) < 2:
raise ValueError(
"Cannot parse hosts from LSB_HOSTS environment variable." ' Expected format: "batch <rank_0_host> ..."'
'Cannot parse hosts from LSB_HOSTS environment variable. Expected format: "batch <rank_0_host> ..."'
)
return hosts

Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/plugins/training_type/deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ def init_ddp_connection(self, global_rank: Optional[int] = None, world_size: Opt
world_size = world_size if world_size is not None else self.cluster_environment.world_size()
self._set_node_environment_variables(global_rank, world_size)
log.info(
f"initializing deepspeed distributed: "
"initializing deepspeed distributed: "
f"GLOBAL_RANK: {global_rank}, "
f"MEMBER: {global_rank + 1}/{world_size}"
)
Expand Down
8 changes: 4 additions & 4 deletions pytorch_lightning/trainer/configuration_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,13 @@ def __verify_manual_optimization_support(self, model: "pl.LightningModule") -> N
return
if self.trainer.gradient_clip_val > 0:
raise MisconfigurationException(
f"Automatic gradient clipping is not supported for manual optimization."
"Automatic gradient clipping is not supported for manual optimization."
f" Remove `Trainer(gradient_clip_val={self.trainer.gradient_clip_val})`"
f" or switch to automatic optimization."
" or switch to automatic optimization."
)
if self.trainer.accumulate_grad_batches != 1:
raise MisconfigurationException(
f"Automatic gradient accumulation is not supported for manual optimization."
"Automatic gradient accumulation is not supported for manual optimization."
f" Remove `Trainer(accumulate_grad_batches={self.trainer.accumulate_grad_batches})`"
f" or switch to automatic optimization."
" or switch to automatic optimization."
)
Original file line number Diff line number Diff line change
Expand Up @@ -582,8 +582,7 @@ def select_precision_plugin(self) -> PrecisionPlugin:
)
if self._is_sharded_training_type or self._is_fully_sharded_training_type:
raise MisconfigurationException(
"Sharded Plugin is not supported with Apex AMP,"
" please using native AMP for 16-bit precision."
"Sharded Plugin is not supported with Apex AMP, please using native AMP for 16-bit precision."
)
log.info("Using APEX 16bit precision.")
return ApexMixedPrecisionPlugin(self.amp_level)
Expand Down
4 changes: 1 addition & 3 deletions pytorch_lightning/trainer/connectors/checkpoint_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,9 +303,7 @@ def hpc_save(self, folderpath: str, logger):
except AttributeError as err:
if pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:
del checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
rank_zero_warn(
"warning, `hyper_parameters` dropped from checkpoint." f" An attribute is not picklable {err}"
)
rank_zero_warn(f"warning, `hyper_parameters` dropped from checkpoint. An attribute is not picklable {err}")
atomic_save(checkpoint, filepath)

return filepath
Expand Down
3 changes: 1 addition & 2 deletions pytorch_lightning/trainer/connectors/data_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ def on_trainer_init(

if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0):
raise MisconfigurationException(
"`reload_dataloaders_every_n_epochs` should be an int >= 0,"
f" got {reload_dataloaders_every_n_epochs}."
f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}."
)

self.trainer.reload_dataloaders_every_n_epochs = reload_dataloaders_every_n_epochs
Expand Down
3 changes: 1 addition & 2 deletions pytorch_lightning/trainer/connectors/debugging_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ def on_init_start(
):
if not isinstance(fast_dev_run, (bool, int)):
raise MisconfigurationException(
f"fast_dev_run={fast_dev_run} is not a valid configuration."
" It should be either a bool or an int >= 0"
f"fast_dev_run={fast_dev_run} is not a valid configuration. It should be either a bool or an int >= 0"
)

if isinstance(fast_dev_run, int) and (fast_dev_run < 0):
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/trainer/data_loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def _worker_check(self, dataloader: DataLoader, name: str) -> None:
f"The dataloader, {name}, does not have many workers which may be a bottleneck."
" Consider increasing the value of the `num_workers` argument`"
f" (try {num_cpus} which is the number of cpus on this machine)"
f" in the `DataLoader` init to improve performance."
" in the `DataLoader` init to improve performance."
)

def auto_add_worker_init_fn(self, dataloader: DataLoader) -> None:
Expand Down Expand Up @@ -347,7 +347,7 @@ def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
f" you want to see logs for the training epoch."
" you want to see logs for the training epoch."
)

def _reset_eval_dataloader(
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/trainer/properties.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,8 @@ def progress_bar_dict(self) -> dict:
rank_zero_warn(
f"The progress bar already tracks a metric with the name(s) '{', '.join(duplicates)}' and"
f" `self.log('{duplicates[0]}', ..., prog_bar=True)` will overwrite this value. "
f" If this is undesired, change the name or override `get_progress_bar_dict()`"
f" in `LightingModule`.",
" If this is undesired, change the name or override `get_progress_bar_dict()`"
" in `LightingModule`.",
UserWarning,
)
return {**standard_metrics, **pbar_metrics}
Expand Down
3 changes: 1 addition & 2 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1312,8 +1312,7 @@ def _log_device_info(self) -> None:

if torch.cuda.is_available() and self._device_type != DeviceType.GPU:
rank_zero_warn(
"GPU available but not used. Set the gpus flag in your trainer"
" `Trainer(gpus=1)` or script `--gpus=1`."
"GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`."
)

if _TPU_AVAILABLE and self._device_type != DeviceType.TPU:
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/tuner/auto_gpu_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ def pick_multiple_gpus(nb):
"""
if nb == 0:
raise MisconfigurationException(
r"auto_select_gpus=True, gpus=0 is not a valid configuration.\
Please select a valid number of GPU resources when using auto_select_gpus."
"auto_select_gpus=True, gpus=0 is not a valid configuration."
" Please select a valid number of GPU resources when using auto_select_gpus."
)

nb = torch.cuda.device_count() if nb == -1 else nb
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/tuner/batch_size_scaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def scale_batch_size(
rank_zero_warn(
f"Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!"
f" `model.{batch_arg_name}` will be used as the initial batch size for scaling."
f" If this is not the intended behavior, please remove either one."
" If this is not the intended behavior, please remove either one."
)

if hasattr(model.train_dataloader, "patch_loader_code"):
Expand Down
4 changes: 2 additions & 2 deletions pytorch_lightning/utilities/model_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def __init__(self, model, mode: Optional[str] = None, max_depth: Optional[int] =
if mode in ModelSummary.MODES:
max_depth = ModelSummary.MODES[mode]
rank_zero_deprecation(
f"Argument `mode` in `ModelSummary` is deprecated in v1.4"
"Argument `mode` in `ModelSummary` is deprecated in v1.4"
f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behaviour."
)
else:
Expand Down Expand Up @@ -459,7 +459,7 @@ def summarize(
if mode in ModelSummary.MODES:
max_depth = ModelSummary.MODES[mode]
rank_zero_deprecation(
f"Argument `mode` in `LightningModule.summarize` is deprecated in v1.4"
"Argument `mode` in `LightningModule.summarize` is deprecated in v1.4"
f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behavior."
)
model_summary = ModelSummary(lightning_module, max_depth=max_depth)
Expand Down
5 changes: 3 additions & 2 deletions pytorch_lightning/utilities/upgrade_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,9 @@ def upgrade_checkpoint(filepath):
if __name__ == "__main__":

parser = argparse.ArgumentParser(
description="Upgrade an old checkpoint to the current schema. \
This will also save a backup of the original file."
description=(
"Upgrade an old checkpoint to the current schema. This will also save a backup of the original file."
)
)
parser.add_argument("--file", help="filepath for a checkpoint to upgrade")

Expand Down
8 changes: 4 additions & 4 deletions tests/tuner/test_auto_gpu_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ def test_trainer_with_gpus_options_combination_at_available_gpus_env(auto_select
with pytest.raises(
expected_error,
match=re.escape(
r"auto_select_gpus=True, gpus=0 is not a valid configuration.\
Please select a valid number of GPU resources when using auto_select_gpus."
"auto_select_gpus=True, gpus=0 is not a valid configuration."
" Please select a valid number of GPU resources when using auto_select_gpus."
),
):
Trainer(auto_select_gpus=auto_select_gpus, gpus=gpus)
Expand All @@ -51,8 +51,8 @@ def test_pick_multiple_gpus(nb, expected_gpu_idxs, expected_error):
with pytest.raises(
expected_error,
match=re.escape(
r"auto_select_gpus=True, gpus=0 is not a valid configuration.\
Please select a valid number of GPU resources when using auto_select_gpus."
"auto_select_gpus=True, gpus=0 is not a valid configuration."
" Please select a valid number of GPU resources when using auto_select_gpus."
),
):
pick_multiple_gpus(nb)
Expand Down

0 comments on commit 413769b

Please sign in to comment.