Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use ruff for our python lints #1378

Merged
merged 9 commits into from
Feb 22, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 0 additions & 34 deletions .flake8

This file was deleted.

4 changes: 4 additions & 0 deletions .github/workflows/python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ jobs:
run: |
just py-lint
- name: Check requirements
run: |
just py-requirements
# ---------------------------------------------------------------------------

matrix-setup:
Expand Down
1 change: 1 addition & 0 deletions .vscode/extensions.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// See https://go.microsoft.com/fwlink/?LinkId=827846
// for the documentation about the extensions.json format
"recommendations": [
"charliermarsh.ruff", // Ruff for linting
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

niiiice

"ms-python.python",
"ms-vsliveshare.vsliveshare", // Live Share
"polymeilex.wgsl",
Expand Down
4 changes: 4 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -63,4 +63,8 @@
"python.analysis.extraPaths": [
"rerun_py/rerun_sdk"
],
"ruff.args": [
"--config",
"rerun_py/pyproject.toml"
],
}
3 changes: 1 addition & 2 deletions examples/python/api_demo/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,12 @@
import math

import numpy as np
import rerun as rr
from rerun.log.annotation import AnnotationInfo
from rerun.log.rects import RectFormat
from rerun.log.text import LoggingHandler, LogLevel
from scipy.spatial.transform import Rotation

import rerun as rr


def run_segmentation() -> None:
rr.set_time_seconds("sim_time", 1)
Expand Down
1 change: 0 additions & 1 deletion examples/python/car/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import cv2
import numpy as np
import numpy.typing as npt

import rerun as rr


Expand Down
1 change: 0 additions & 1 deletion examples/python/clock/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from typing import Final, Tuple

import numpy as np

import rerun as rr

LENGTH_S: Final = 20.0
Expand Down
4 changes: 1 addition & 3 deletions examples/python/colmap/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,10 @@
import numpy as np
import numpy.typing as npt
import requests
import rerun as rr
from read_write_model import Camera, read_model
from tqdm import tqdm

import rerun as rr

DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset"
DATASET_URL_BASE: Final = "https://storage.googleapis.com/rerun-example-datasets/colmap"
# When dataset filtering is turned on, drop views with less than this many valid points.
Expand Down Expand Up @@ -55,7 +54,6 @@ def intrinsics_for_camera(camera: Camera) -> npt.NDArray[Any]:


def get_downloaded_dataset_path(dataset_name: str) -> Path:

dataset_url = f"{DATASET_URL_BASE}/{dataset_name}.zip"

recording_dir = DATASET_DIR / dataset_name
Expand Down
3 changes: 1 addition & 2 deletions examples/python/deep_sdf/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,13 @@
import mesh_to_sdf
import numpy as np
import numpy.typing as npt
import rerun as rr
import trimesh
from download_dataset import AVAILABLE_MESHES, ensure_mesh_downloaded
from rerun.log.file import MeshFormat
from rerun.log.text import LogLevel
from trimesh import Trimesh

import rerun as rr

CACHE_DIR = Path(os.path.dirname(__file__)) / "cache"


Expand Down
1 change: 0 additions & 1 deletion examples/python/dicom/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import numpy.typing as npt
import pydicom as dicom
import requests

import rerun as rr

DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset"
Expand Down
3 changes: 1 addition & 2 deletions examples/python/dna/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,11 @@
from math import tau

import numpy as np
import rerun as rr
from rerun_demo.data import build_color_spiral
from rerun_demo.util import bounce_lerp, interleave
from scipy.spatial.transform import Rotation

import rerun as rr

rr.init("DNA Abacus")

rr.spawn()
Expand Down
1 change: 0 additions & 1 deletion examples/python/minimal/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
"""Demonstrates the most barebone usage of the Rerun SDK."""

import numpy as np

import rerun as rr

rr.spawn()
Expand Down
4 changes: 1 addition & 3 deletions examples/python/mp_pose/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@
import numpy as np
import numpy.typing as npt
import requests
from rerun.log.annotation import AnnotationInfo, ClassDescription

import rerun as rr
from rerun.log.annotation import AnnotationInfo, ClassDescription

EXAMPLE_DIR: Final = Path(os.path.dirname(__file__))
DATASET_DIR: Final = EXAMPLE_DIR / "dataset" / "pose_movement"
Expand All @@ -41,7 +40,6 @@ def track_pose(video_path: str, segment: bool) -> None:

with closing(VideoSource(video_path)) as video_source, mp_pose.Pose(enable_segmentation=segment) as pose:
for bgr_frame in video_source.stream_bgr():

rgb = cv.cvtColor(bgr_frame.data, cv.COLOR_BGR2RGB)
rr.set_time_seconds("time", bgr_frame.time)
rr.set_time_sequence("frame_idx", bgr_frame.idx)
Expand Down
3 changes: 1 addition & 2 deletions examples/python/multithreading/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,8 @@

import numpy as np
import numpy.typing as npt
from rerun.log.rects import RectFormat

import rerun as rr
from rerun.log.rects import RectFormat


def rect_logger(path: str, color: npt.NDArray[np.float32]) -> None:
Expand Down
3 changes: 1 addition & 2 deletions examples/python/nyud/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,8 @@
import numpy as np
import numpy.typing as npt
import requests
from tqdm import tqdm

import rerun as rr
from tqdm import tqdm

DEPTH_IMAGE_SCALING: Final = 1e4
DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset"
Expand Down
4 changes: 2 additions & 2 deletions examples/python/objectron/download_dataset.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import os
from pathlib import Path
from typing import Final, List, Optional
from typing import Final, Optional

import cv2
import requests
Expand Down Expand Up @@ -39,7 +39,7 @@ def ensure_downloaded(src_url: str, dst_path: Path) -> None:

def find_path_if_downloaded(recording_name: str, local_dataset_dir: Path) -> Optional[Path]:
local_recording_dir = local_dataset_dir / recording_name
paths = list(local_recording_dir.glob(f"**/{ANNOTATIONS_FILENAME}")) # type: List[Path]
paths = list(local_recording_dir.glob(f"**/{ANNOTATIONS_FILENAME}"))
if paths:
return paths[0].parent
return None
Expand Down
5 changes: 2 additions & 3 deletions examples/python/objectron/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import numpy as np
import numpy.typing as npt
import rerun as rr
from download_dataset import (
ANNOTATIONS_FILENAME,
AVAILABLE_RECORDINGS,
Expand All @@ -38,8 +39,6 @@
from rerun.log.file import ImageFormat
from scipy.spatial.transform import Rotation as R

import rerun as rr


@dataclass
class SampleARFrame:
Expand Down Expand Up @@ -213,7 +212,7 @@ def log_frame_annotations(frame_times: List[float], frame_annotations: List[Fram
if len(keypoint_pos2s) == 9:
log_projected_bbox(f"world/camera/video/estimates/box-{obj_ann.object_id}", keypoint_pos2s)
else:
for (id, pos2) in zip(keypoint_ids, keypoint_pos2s):
for id, pos2 in zip(keypoint_ids, keypoint_pos2s):
rr.log_point(
f"world/camera/video/estimates/box-{obj_ann.object_id}/{id}",
pos2,
Expand Down
1 change: 0 additions & 1 deletion examples/python/plots/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from math import cos, sin, tau

import numpy as np

import rerun as rr


Expand Down
3 changes: 1 addition & 2 deletions examples/python/raw_mesh/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,10 @@
from typing import Optional, cast

import numpy as np
import rerun as rr
import trimesh
from download_dataset import AVAILABLE_MESHES, ensure_mesh_downloaded

import rerun as rr


def load_scene(path: Path) -> trimesh.Scene:
print(f"loading scene {path}…")
Expand Down
1 change: 1 addition & 0 deletions examples/python/stable_diffusion/huggingface_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
logger.addHandler(rr.log.text.LoggingHandler("logs"))


# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
def preprocess(image):
if isinstance(image, torch.Tensor):
Expand Down
3 changes: 1 addition & 2 deletions examples/python/stable_diffusion/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,11 @@
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"

import requests
import rerun as rr
import torch
from huggingface_pipeline import StableDiffusionDepth2ImgPipeline
from PIL import Image

import rerun as rr

EXAMPLE_DIR: Final = Path(os.path.dirname(__file__))
DATASET_DIR: Final = EXAMPLE_DIR / "dataset"
CACHE_DIR: Final = EXAMPLE_DIR / "cache"
Expand Down
3 changes: 1 addition & 2 deletions examples/python/tracking_hf_opencv/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,8 @@
import numpy as np
import numpy.typing as npt
import requests
from PIL import Image

import rerun as rr
from PIL import Image

EXAMPLE_DIR: Final = Path(os.path.dirname(__file__))
DATASET_DIR: Final = EXAMPLE_DIR / "dataset" / "tracking_sequences"
Expand Down
7 changes: 3 additions & 4 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -49,20 +49,19 @@ py-build:
py-format:
black --config rerun_py/pyproject.toml {{py_folders}}
blackdoc {{py_folders}}
isort {{py_folders}}
pyupgrade --py37-plus `find rerun_py/rerun/ -name "*.py" -type f`
ruff --fix --config rerun_py/pyproject.toml {{py_folders}}

# Check that all the requirements.txt files for all the examples are correct
py-requirements:
find examples/python/ -name main.py | xargs -I _ sh -c 'cd $(dirname _) && echo $(pwd) && pip-missing-reqs . || exit 255'

# Run linting
py-lint: py-requirements
py-lint:
jleibs marked this conversation as resolved.
Show resolved Hide resolved
black --check --config rerun_py/pyproject.toml --diff {{py_folders}}
blackdoc --check {{py_folders}}
isort --check {{py_folders}}
ruff check --config rerun_py/pyproject.toml {{py_folders}}
mypy --no-warn-unused-ignore
jleibs marked this conversation as resolved.
Show resolved Hide resolved
flake8 {{py_folders}}

# Run fast unittests
py-test:
Expand Down
2 changes: 0 additions & 2 deletions rerun_py/docs/gen_common_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ def make_slug(s: str) -> str:


with mkdocs_gen_files.open(index_path, "w") as index_file:

# TODO(#1161): add links to our high-level docs!

# Hide the TOC for the index since it's identical to the left nav-bar
Expand All @@ -175,7 +174,6 @@ def make_slug(s: str) -> str:
)

for section in SECTION_TABLE:

# Turn the heading into a slug and add it to the nav
md_name = make_slug(section.title)
md_file = md_name + ".md"
Expand Down
47 changes: 47 additions & 0 deletions rerun_py/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,53 @@ rerun = "rerun.__main__:main"
line-length = 120
target-version = ["py38"]

[tool.ruff]
# https://beta.ruff.rs/docs/configuration/

extend-exclude = [
# Automatically generated test artifacts
"venv/",
"target/",

# generated
"examples/python/objectron/proto/objectron/proto.py",

# Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py
"examples/python/stable_diffusion/huggingface_pipeline.py",

# Copied from https://github.com/colmap/colmap/blob/bf3e19140f491c3042bfd85b7192ef7d249808ec/scripts/python/read_write_model.py
"examples/python/colmap/read_write_model.py",
]
ignore = [
# Missing docstring in public function - TODO(emilk): enable for SDK but not for examples
"D1",

# No blank lines allowed after function docstring.
"D202",

# npydocstyle: http://www.pydocstyle.org/en/stable/error_codes.html
# numpy convention with a few additional lints
"D107",
"D203",
"D212",
"D401",
"D402",
"D415",
"D416",
]
line-length = 120
select = [
"D", # pydocstyle codes https://www.pydocstyle.org/en/latest/error_codes.html
"E", # pycodestyle error codes: https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
"F", # Flake8 error codes https://flake8.pycqa.org/en/latest/user/error-codes.html
"I", # Isort
"TID", # flake8-tidy-imports
"W", # pycodestyle warning codes: https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
]

[tool.ruff.flake8-tidy-imports]
ban-relative-imports = "all"

[tool.maturin]
# We use a python package from inside the rerun_sdk folder to avoid conflicting
# with the other `rerun` pypi package. The rerun_sdk.pth adds this to the pythonpath
Expand Down
Loading