Skip to content

Commit

Permalink
Remove unnecessary use of comprehension (#8149)
Browse files Browse the repository at this point in the history
Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
  • Loading branch information
deepsource-autofix[bot] authored Jun 27, 2021
1 parent 3526d93 commit e11fe19
Show file tree
Hide file tree
Showing 10 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion pytorch_lightning/loggers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ def __init__(self, logger_iterable: Iterable[LightningLoggerBase]):
self._logger_iterable = logger_iterable

def __getitem__(self, index: int) -> LightningLoggerBase:
return [logger for logger in self._logger_iterable][index]
return list(self._logger_iterable)[index]

def after_save_checkpoint(self, checkpoint_callback: 'ReferenceType[ModelCheckpoint]') -> None:
for logger in self._logger_iterable:
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/trainer/connectors/env_vars_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def insert_env_defaults(self, *args, **kwargs):
# parse only the argument names
cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]
# convert args to kwargs
kwargs.update({k: v for k, v in zip(cls_arg_names, args)})
kwargs.update(dict(zip(cls_arg_names, args)))
env_variables = vars(parse_env_variables(cls))
# update the kwargs by env variables
kwargs = dict(list(env_variables.items()) + list(kwargs.items()))
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/trainer/supporters.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def to_disk(self) -> None:
# Switch predictions so each entry has its own dict
outputs = []
for values in zip(*predictions.values()):
output_element = {k: v for k, v in zip(predictions.keys(), values)}
output_element = dict(zip(predictions.keys(), values))
outputs.append(output_element)

# Write predictions for current file to disk
Expand Down
6 changes: 3 additions & 3 deletions tests/callbacks/test_lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def test_lr_monitor_custom_pg_name(tmpdir):
class TestModel(BoringModel):

def configure_optimizers(self):
optimizer = torch.optim.SGD([{'params': [p for p in self.layer.parameters()], 'name': 'linear'}], lr=0.1)
optimizer = torch.optim.SGD([{'params': list(self.layer.parameters()), 'name': 'linear'}], lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]

Expand Down Expand Up @@ -325,11 +325,11 @@ def forward(self, x):
def configure_optimizers(self):
param_groups = [
{
'params': [p for p in self.linear_a.parameters()],
'params': list(self.linear_a.parameters()),
'name': 'linear'
},
{
'params': [p for p in self.linear_b.parameters()],
'params': list(self.linear_b.parameters()),
'name': 'linear'
},
]
Expand Down
2 changes: 1 addition & 1 deletion tests/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int):
path_yaml = os.path.join(tmpdir, 'best_k_models.yaml')
checkpoint.to_yaml(path_yaml)
d = yaml.full_load(open(path_yaml, 'r'))
best_k = {k: v for k, v in checkpoint.best_k_models.items()}
best_k = dict(checkpoint.best_k_models.items())
assert d == best_k


Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def test_horovod_multi_optimizer(tmpdir):
assert hasattr(optimizer, 'synchronize'), 'optimizer has not been wrapped into DistributedOptimizer'

def get_model_params(model):
return set([p for p in model.parameters()])
return set(list(model.parameters()))

def get_optimizer_params(optimizer):
return set([p for group in optimizer.param_groups for p in group.get('params', [])])
Expand Down
2 changes: 1 addition & 1 deletion tests/overrides/test_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def test_unrepeated_distributed_sampler(shuffle, tmpdir):
for rank in range(world_size):
samplers.append(UnrepeatedDistributedSampler(dataset, rank=rank, num_replicas=world_size, shuffle=shuffle))

indices = [[v for v in s] for s in samplers]
indices = [list(s) for s in samplers]
assert len(indices[0]) == 26
assert len(indices[1]) == 26
assert len(indices[2]) == 26
Expand Down
4 changes: 2 additions & 2 deletions tests/trainer/test_dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -813,11 +813,11 @@ def test_missing_worker_init_fn():

seed_everything(0)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, shuffle=False)
batches0 = torch.cat([batch for batch in dataloader])
batches0 = torch.cat(list(dataloader))

seed_everything(0)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, shuffle=False)
batches1 = torch.cat([batch for batch in dataloader])
batches1 = torch.cat(list(dataloader))

is_duplicated = len(torch.unique(batches1, dim=0)) < len(dataset)
is_deterministic = torch.eq(batches0, batches1).all()
Expand Down
4 changes: 2 additions & 2 deletions tests/trainer/test_supporters.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __iter__(self):

dataset = IterDataset()
iterator = prefetch_iterator(dataset)
assert [item for item in iterator] == [(1, False), (2, False), (3, True)]
assert list(iterator) == [(1, False), (2, False), (3, True)]

class EmptyIterDataset(IterableDataset):

Expand All @@ -100,7 +100,7 @@ def __iter__(self):

dataset = EmptyIterDataset()
iterator = prefetch_iterator(dataset)
assert [item for item in iterator] == []
assert list(iterator) == []


@pytest.mark.parametrize(
Expand Down
2 changes: 1 addition & 1 deletion tests/tuner/test_auto_gpu_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_trainer_with_gpus_options_combination_at_available_gpus_env(auto_select
["nb", "expected_gpu_idxs", "expected_error"],
[
(0, [], MisconfigurationException),
(-1, [i for i in range(torch.cuda.device_count())], None),
(-1, list(range(torch.cuda.device_count())), None),
(1, [0], None),
],
)
Expand Down

0 comments on commit e11fe19

Please sign in to comment.