Skip to content
This repository has been archived by the owner on Oct 9, 2023. It is now read-only.

Commit

Permalink
pre-commit: pyupgrade (#658)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Borda and pre-commit-ci[bot] authored Aug 15, 2021
1 parent 5f11ebc commit 9061d4b
Show file tree
Hide file tree
Showing 20 changed files with 45 additions and 43 deletions.
7 changes: 7 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,13 @@ repos:
- id: check-added-large-files
- id: detect-private-key

- repo: https://github.com/asottile/pyupgrade
rev: v2.23.0
hooks:
- id: pyupgrade
args: [--py36-plus]
name: Upgrade code

- repo: https://github.com/PyCQA/isort
rev: 5.9.3
hooks:
Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def setup(app):
# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule
def _package_list_from_file(pfile):
assert os.path.isfile(pfile)
with open(pfile, "r") as fp:
with open(pfile) as fp:
lines = fp.readlines()
list_pkgs = []
for ln in lines:
Expand Down
4 changes: 1 addition & 3 deletions flash/core/data/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -684,9 +684,7 @@ def predict_load_data(data: SampleCollection, dataset: Optional[Any] = None) ->
def _validate(self, data):
label_type = data._get_label_field_type(self.label_field)
if not issubclass(label_type, self.label_cls):
raise ValueError(
"Expected field '%s' to have type %s; found %s" % (self.label_field, self.label_cls, label_type)
)
raise ValueError(f"Expected field '{self.label_field}' to have type {self.label_cls}; found {label_type}")

def _get_classes(self, data):
classes = data.classes.get(self.label_field, None)
Expand Down
4 changes: 2 additions & 2 deletions flash/core/data/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class ApplyToKeys(nn.Sequential):
"""

def __init__(self, keys: Union[str, Sequence[str]], *args):
super().__init__(*[convert_to_modules(arg) for arg in args])
super().__init__(*(convert_to_modules(arg) for arg in args))
if isinstance(keys, str):
keys = [keys]
self.keys = keys
Expand Down Expand Up @@ -72,7 +72,7 @@ class KorniaParallelTransforms(nn.Sequential):
"""

def __init__(self, *args):
super().__init__(*[convert_to_modules(arg) for arg in args])
super().__init__(*(convert_to_modules(arg) for arg in args))

def forward(self, inputs: Any):
result = list(inputs) if isinstance(inputs, Sequence) else [inputs]
Expand Down
6 changes: 3 additions & 3 deletions flash/core/serve/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,12 +94,12 @@ def _validate_model_args(
raise ValueError(f"Iterable args={args} must have length >= 1")

if isinstance(args, (list, tuple)):
if not all((isinstance(x, _Servable_t) for x in args)):
if not all(isinstance(x, _Servable_t) for x in args):
raise TypeError(f"One of arg in args={args} is not type {_Servable_t}")
elif isinstance(args, dict):
if not all((isinstance(x, str) for x in args.keys())):
if not all(isinstance(x, str) for x in args.keys()):
raise TypeError(f"One of keys in args={args.keys()} is not type {str}")
if not all((isinstance(x, _Servable_t) for x in args.values())):
if not all(isinstance(x, _Servable_t) for x in args.values()):
raise TypeError(f"One of values in args={args} is not type {_Servable_t}")
elif not isinstance(args, _Servable_t):
raise TypeError(f"Args must be instance, list/tuple, or mapping of {_Servable_t}")
Expand Down
2 changes: 1 addition & 1 deletion flash/core/serve/dag/rewrite.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def _apply(self, sub_dict):
return term

def __str__(self):
return "RewriteRule({0}, {1}, {2})".format(self.lhs, self.rhs, self.vars)
return f"RewriteRule({self.lhs}, {self.rhs}, {self.vars})"

def __repr__(self):
return str(self)
Expand Down
9 changes: 3 additions & 6 deletions flash/core/serve/dag/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,10 @@ def preorder_traversal(task):

for item in task:
if istask(item):
for i in preorder_traversal(item):
yield i
yield from preorder_traversal(item)
elif isinstance(item, list):
yield list
for i in preorder_traversal(item):
yield i
yield from preorder_traversal(item)
else:
yield item

Expand Down Expand Up @@ -222,8 +220,7 @@ def flatten(seq, container=list):
else:
for item in seq:
if isinstance(item, container):
for item2 in flatten(item, container=container):
yield item2
yield from flatten(item, container=container)
else:
yield item

Expand Down
2 changes: 1 addition & 1 deletion flash/core/serve/types/label.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __post_init__(self):
"Must provide either classes as a list or " "path to a text file that contains classes"
)
with Path(self.path).open(mode="r") as f:
self.classes = tuple([item.strip() for item in f.readlines()])
self.classes = tuple(item.strip() for item in f.readlines())
if isinstance(self.classes, dict):
self._reverse_map = {}
for key, value in self.classes.items():
Expand Down
4 changes: 2 additions & 2 deletions flash/core/serve/types/repeated.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __post_init__(self):
def deserialize(self, *args: Dict) -> Tuple[Tensor, ...]:
if (self.max_len is not None) and (len(args) > self.max_len):
raise ValueError(f"len(arg)={len(args)} > self.max_len={self.max_len}")
return tuple((self.dtype.deserialize(**item) for item in args))
return tuple(self.dtype.deserialize(**item) for item in args)

def packed_deserialize(self, args):
"""Arguments are positional arguments for deserialize, unlike other datatypes."""
Expand All @@ -59,4 +59,4 @@ def packed_deserialize(self, args):
def serialize(self, args: Sequence) -> Tuple[Any, ...]:
if (self.max_len is not None) and (len(args) > self.max_len):
raise ValueError(f"len(arg)={len(args)} > self.max_len={self.max_len}")
return tuple((self.dtype.serialize(item) for item in args))
return tuple(self.dtype.serialize(item) for item in args)
8 changes: 4 additions & 4 deletions flash/image/classification/backbones/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def __init__(
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(BasicBlock, self).__init__()
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
Expand Down Expand Up @@ -118,7 +118,7 @@ def __init__(
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(Bottleneck, self).__init__()
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
Expand Down Expand Up @@ -171,7 +171,7 @@ def __init__(
remove_first_maxpool: bool = False,
) -> None:

super(ResNet, self).__init__()
super().__init__()

if norm_layer is None:
norm_layer = nn.BatchNorm2d
Expand Down Expand Up @@ -320,7 +320,7 @@ def _resnet(
model_weights = None
if pretrained_flag:
if "supervised" not in weights_paths:
raise KeyError("Supervised pretrained weights not available for {0}".format(model_name))
raise KeyError(f"Supervised pretrained weights not available for {model_name}")

model_weights = load_state_dict_from_url(
weights_paths["supervised"], map_location=torch.device("cpu") if device == -1 else torch.device(device)
Expand Down
2 changes: 1 addition & 1 deletion flash/image/detection/serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def serialize(self, sample: Dict[str, Any]) -> Union[Detections, Dict[str, Any]]
if self.threshold is not None and confidence < self.threshold:
continue

xmin, ymin, xmax, ymax = [c.tolist() for c in det["boxes"]]
xmin, ymin, xmax, ymax = (c.tolist() for c in det["boxes"])
box = [
xmin / width,
ymin / height,
Expand Down
2 changes: 1 addition & 1 deletion flash/image/segmentation/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def from_data_source(
if flash._IS_TESTING:
data_fetcher.block_viz_window = True

dm = super(SemanticSegmentationData, cls).from_data_source(
dm = super().from_data_source(
data_source=data_source,
train_data=train_data,
val_data=val_data,
Expand Down
2 changes: 1 addition & 1 deletion flash/pointcloud/detection/open3d_ml/data_sources.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def load_meta(self, root_dir, dataset: Optional[BaseAutoDataset]):
if not exists(meta_file):
raise MisconfigurationException(f"The {root_dir} should contain a `meta.yaml` file about the classes.")

with open(meta_file, "r") as f:
with open(meta_file) as f:
self.meta = yaml.safe_load(f)

if "label_to_names" not in self.meta:
Expand Down
6 changes: 3 additions & 3 deletions flash/pointcloud/segmentation/open3d_ml/sequences_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,13 @@ def load_meta(self, root_dir):
f"The {root_dir} should contain a `meta.yaml` file about the pointcloud sequences."
)

with open(meta_file, "r") as f:
with open(meta_file) as f:
self.meta = yaml.safe_load(f)

self.label_to_names = self.get_label_to_names()
self.num_classes = len(self.label_to_names)

with open(meta_file, "r") as f:
with open(meta_file) as f:
self.meta = yaml.safe_load(f)

remap_dict_val = self.meta["learning_map"]
Expand Down Expand Up @@ -169,7 +169,7 @@ def get_attr(self, idx):
pc_path = self.path_list[idx]
dir, file = split(pc_path)
_, seq = split(split(dir)[0])
name = "{}_{}".format(seq, file[:-4])
name = f"{seq}_{file[:-4]}"

pc_path = str(pc_path)
attr = {"idx": idx, "name": name, "path": pc_path, "split": self.split}
Expand Down
2 changes: 1 addition & 1 deletion flash/setup_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@


def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comment_chars: str = "#@") -> List[str]:
with open(os.path.join(path_dir, file_name), "r") as file:
with open(os.path.join(path_dir, file_name)) as file:
lines = [ln.strip() for ln in file.readlines()]
reqs = []
for ln in lines:
Expand Down
2 changes: 1 addition & 1 deletion flash/text/seq2seq/core/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def aggregate(self):
# Percentiles are returned as (interval, measure).
percentiles = self._bootstrap_resample(score_matrix)
# Extract the three intervals (low, mid, high).
intervals = tuple((Score(*percentiles[j, :]) for j in range(3)))
intervals = tuple(Score(*percentiles[j, :]) for j in range(3))
result[score_type] = AggregateScore(low=intervals[0], mid=intervals[1], high=intervals[2])
return result

Expand Down
2 changes: 1 addition & 1 deletion flash/video/classification/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]


class BaseVideoClassification(object):
class BaseVideoClassification:
def __init__(
self,
clip_sampler: "ClipSampler",
Expand Down
18 changes: 9 additions & 9 deletions tests/core/serve/test_dag/test_order.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@ def f(*args):

def test_ordering_keeps_groups_together(abcde):
a, b, c, d, e = abcde
d = dict(((a, i), (f,)) for i in range(4))
d = {(a, i): (f,) for i in range(4)}
d.update({(b, 0): (f, (a, 0), (a, 1)), (b, 1): (f, (a, 2), (a, 3))})
o = order(d)

assert abs(o[(a, 0)] - o[(a, 1)]) == 1
assert abs(o[(a, 2)] - o[(a, 3)]) == 1

d = dict(((a, i), (f,)) for i in range(4))
d = {(a, i): (f,) for i in range(4)}
d.update({(b, 0): (f, (a, 0), (a, 2)), (b, 1): (f, (a, 1), (a, 3))})
o = order(d)

Expand Down Expand Up @@ -220,7 +220,7 @@ def test_prefer_deep(abcde):


def test_stacklimit(abcde):
dsk = dict(("x%s" % (i + 1), (inc, "x%s" % i)) for i in range(10000))
dsk = {"x%s" % (i + 1): (inc, "x%s" % i) for i in range(10000)}
dependencies, dependents = get_deps(dsk)
ndependencies(dependencies, dependents)

Expand Down Expand Up @@ -280,7 +280,7 @@ def test_run_smaller_sections(abcde):
Prefer to run acb first because then we can get that out of the way
"""
a, b, c, d, e = abcde
aa, bb, cc, dd = [x * 2 for x in [a, b, c, d]]
aa, bb, cc, dd = (x * 2 for x in [a, b, c, d])

expected = [a, c, b, e, d, cc, bb, aa, dd]

Expand Down Expand Up @@ -325,9 +325,9 @@ def test_local_parents_of_reduction(abcde):
Prefer to finish a1 stack before proceeding to b2
"""
a, b, c, d, e = abcde
a1, a2, a3 = [a + i for i in "123"]
b1, b2, b3 = [b + i for i in "123"]
c1, c2, c3 = [c + i for i in "123"]
a1, a2, a3 = (a + i for i in "123")
b1, b2, b3 = (b + i for i in "123")
c1, c2, c3 = (c + i for i in "123")

expected = [a3, a2, a1, b3, b2, b1, c3, c2, c1]

Expand Down Expand Up @@ -368,8 +368,8 @@ def test_nearest_neighbor(abcde):
This is difficult because all groups are connected.
"""
a, b, c, _, _ = abcde
a1, a2, a3, a4, a5, a6, a7, a8, a9 = [a + i for i in "123456789"]
b1, b2, b3, b4 = [b + i for i in "1234"]
a1, a2, a3, a4, a5, a6, a7, a8, a9 = (a + i for i in "123456789")
b1, b2, b3, b4 = (b + i for i in "1234")

dsk = {
b1: (f,),
Expand Down
2 changes: 1 addition & 1 deletion tests/examples/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def call_script(
args: Optional[List[str]] = None,
timeout: Optional[int] = 60 * 10,
) -> Tuple[int, str, str]:
with open(filepath, "r") as original:
with open(filepath) as original:
data = original.read()

with open(filepath, "w") as modified:
Expand Down
2 changes: 1 addition & 1 deletion tests/image/test_backbones.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def test_pretrained_weights_registry(backbone, pretrained, expected_num_features
],
)
def test_wide_resnets(backbone, pretrained):
with pytest.raises(KeyError, match="Supervised pretrained weights not available for {0}".format(backbone)):
with pytest.raises(KeyError, match=f"Supervised pretrained weights not available for {backbone}"):
IMAGE_CLASSIFIER_BACKBONES.get(backbone)(pretrained=pretrained)


Expand Down

0 comments on commit 9061d4b

Please sign in to comment.