diff --git a/.mypy.ini b/.mypy.ini index 981fc94789dc..396287173905 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -1,5 +1,5 @@ [mypy] -files = rerun_py/rerun, rerun_py/tests, examples +files = rerun_py/rerun_sdk/rerun, rerun_py/tests, examples exclude = examples/objectron/dataset/proto namespace_packages = True show_error_codes = True diff --git a/rerun_py/Cargo.toml b/rerun_py/Cargo.toml index e3a94a75729b..7f8348e9d305 100644 --- a/rerun_py/Cargo.toml +++ b/rerun_py/Cargo.toml @@ -65,4 +65,4 @@ pyo3-build-config = "0.18.0" toml = "0.7" [package.metadata.maturin] -name = "rerun.rerun_bindings" +name = "rerun_bindings" diff --git a/rerun_py/docs/gen_common_index.py b/rerun_py/docs/gen_common_index.py index 79608c2ca260..d46b52862336 100644 --- a/rerun_py/docs/gen_common_index.py +++ b/rerun_py/docs/gen_common_index.py @@ -68,7 +68,7 @@ ] # Virual folder where we will generate the md files -root = Path(__file__).parent.parent.resolve() +root = Path(__file__).parent.parent.joinpath("rerun_sdk").resolve() common_dir = Path("common") # We use griffe to access docstrings diff --git a/rerun_py/docs/gen_package_index.py b/rerun_py/docs/gen_package_index.py index 91a076bba3b1..8ebd35488fed 100644 --- a/rerun_py/docs/gen_package_index.py +++ b/rerun_py/docs/gen_package_index.py @@ -35,7 +35,7 @@ import mkdocs_gen_files -root = Path(__file__).parent.parent.resolve() +root = Path(__file__).parent.parent.joinpath("rerun_sdk").resolve() package_dir = Path("package") nav = mkdocs_gen_files.Nav() diff --git a/rerun_py/mkdocs.yml b/rerun_py/mkdocs.yml index 8ba9b9180180..5ba8db6ef500 100644 --- a/rerun_py/mkdocs.yml +++ b/rerun_py/mkdocs.yml @@ -22,7 +22,7 @@ plugins: custom_templates: rerun_py/docs/templates # Override the function template. NOTE: relative to working directory. (https://github.com/mkdocstrings/mkdocstrings/issues/477) handlers: python: - paths: ["."] # Lookup python modules relative to this path + paths: ["rerun_sdk"] # Lookup python modules relative to this path import: # Cross-references for python and numpy - https://docs.python.org/3/objects.inv - https://numpy.org/doc/stable/objects.inv diff --git a/rerun_py/pyproject.toml b/rerun_py/pyproject.toml index 15127bf433c7..f48953936d5a 100644 --- a/rerun_py/pyproject.toml +++ b/rerun_py/pyproject.toml @@ -40,4 +40,10 @@ line-length = 120 target-version = ["py38"] [tool.maturin] +# We use a python package from inside the rerun_sdk folder to avoid conflicting +# with the other `rerun` pypi package. The rerun_sdk.pth adds this to the pythonpath +# which then allows `import rerun` to work as expected. +# See https://github.com/rerun-io/rerun/pull/1085 for more details +include = ["rerun_sdk.pth"] locked = true +python-packages = ["rerun_sdk/rerun"] diff --git a/rerun_py/rerun/__init__.py b/rerun_py/rerun/__init__.py index 420a6a47a626..67e20eb2f652 100644 --- a/rerun_py/rerun/__init__.py +++ b/rerun_py/rerun/__init__.py @@ -1,349 +1,24 @@ -"""The Rerun Python SDK, which is a wrapper around the rerun_sdk crate.""" +""" +A shim necessary to make maturin dev builds work properly. -import atexit -from typing import Optional +Our maturin builds stick our package inside of a "rerun_sdk" folder +to avoid conflicting with the non-rerun "rerun" package. In released +builds, we include a rerun_sdk.pth file that makes things work properly, +but that doesn't work in dev builds where maturin generates its own +.pth file that points 1 level too high. -import rerun.rerun_bindings as bindings # type: ignore[attr-defined] -from rerun.log import log_cleared -from rerun.log.annotation import log_annotation_context -from rerun.log.arrow import log_arrow -from rerun.log.bounding_box import log_obb -from rerun.log.camera import log_pinhole -from rerun.log.extension_components import log_extension_components -from rerun.log.file import log_image_file, log_mesh_file -from rerun.log.image import log_depth_image, log_image, log_segmentation_image -from rerun.log.lines import log_line_segments, log_path -from rerun.log.mesh import log_mesh, log_meshes -from rerun.log.points import log_point, log_points -from rerun.log.rects import log_rect, log_rects -from rerun.log.scalar import log_scalar -from rerun.log.tensor import log_tensor -from rerun.log.text import log_text_entry -from rerun.log.transform import log_rigid3, log_unknown_transform, log_view_coordinates +When we encounter this file on import, we instead redirect to the +real rerun module by adding it to the path and then, and then +replacing our own module content with it. +""" +import pathlib +import sys -__all__ = [ - "bindings", - "components", - "log_annotation_context", - "log_arrow", - "log_cleared", - "log_cleared", - "log_depth_image", - "log_image", - "log_image_file", - "log_line_segments", - "log_mesh", - "log_meshes", - "log_mesh_file", - "log_obb", - "log_path", - "log_pinhole", - "log_point", - "log_points", - "log_rect", - "log_rects", - "log_rigid3", - "log_scalar", - "log_segmentation_image", - "log_tensor", - "log_text_entry", - "log_unknown_transform", - "log_extension_components", - "log_view_coordinates", -] +real_path = pathlib.Path(__file__).parent.parent.joinpath("rerun_sdk").resolve() +print("DEV ENVIRONMENT DETECTED! Re-importing rerun from: {}".format(real_path), file=sys.stderr) -def rerun_shutdown() -> None: - bindings.shutdown() +sys.path.insert(0, str(real_path)) - -atexit.register(rerun_shutdown) - - -def unregister_shutdown() -> None: - atexit.unregister(rerun_shutdown) - - -# ----------------------------------------------------------------------------- - - -def get_recording_id() -> str: - """ - Get the recording ID that this process is logging to, as a UUIDv4. - - The default recording_id is based on `multiprocessing.current_process().authkey` - which means that all processes spawned with `multiprocessing` - will have the same default recording_id. - - If you are not using `multiprocessing` and still want several different Python - processes to log to the same Rerun instance (and be part of the same recording), - you will need to manually assign them all the same recording_id. - Any random UUIDv4 will work, or copy the recording id for the parent process. - - Returns - ------- - str - The recording ID that this process is logging to. - - """ - return str(bindings.get_recording_id()) - - -def set_recording_id(value: str) -> None: - """ - Set the recording ID that this process is logging to, as a UUIDv4. - - The default recording_id is based on `multiprocessing.current_process().authkey` - which means that all processes spawned with `multiprocessing` - will have the same default recording_id. - - If you are not using `multiprocessing` and still want several different Python - processes to log to the same Rerun instance (and be part of the same recording), - you will need to manually assign them all the same recording_id. - Any random UUIDv4 will work, or copy the recording id for the parent process. - - Parameters - ---------- - value : str - The recording ID to use for this process. - - """ - bindings.set_recording_id(value) - - -def init(application_id: str, spawn_and_connect: bool = False) -> None: - """ - Initialize the Rerun SDK with a user-chosen application id (name). - - Parameters - ---------- - application_id : str - Your Rerun recordings will be categorized by this application id, so - try to pick a unique one for each application that uses the Rerun SDK. - - For instance, if you have one application doing object detection - and another doing camera calibration, you could have - `rerun.init("object_detector")` and `rerun.init("calibrator")`. - spawn_and_connect : bool - Spawn a Rerun Viewer and stream logging data to it. - Short for calling `spawn_and_connect` separately. - If you don't call this, log events will be buffered indefinitely until - you call either `connect`, `show`, or `save` - - """ - app_path = None - - # NOTE: It'd be even nicer to do such thing on the Rust-side so that this little trick would - # only need to be written once and just work for all languages out of the box... unfortunately - # we lose most of the details of the python part of the backtrace once we go over the bridge. - # - # Still, better than nothing! - try: - import inspect - import pathlib - - app_path = pathlib.Path(inspect.stack()[1][1]).resolve() - except Exception: - pass - - bindings.init(application_id, app_path) - - if spawn_and_connect: - _spawn_and_connect() - - -def spawn_and_connect(port: int = 9876) -> None: - """ - Spawn a Rerun Viewer and stream logging data to it. - - This is often the easiest and best way to use Rerun. - Just call this once at the start of your program. - - Parameters - ---------- - port : int - The port to connect to - - See Also - -------- - * [rerun.connect][] - - """ - spawn_viewer(port) - connect(f"127.0.0.1:{port}") - - -_spawn_and_connect = spawn_and_connect # we need this because Python scoping is horrible - - -def connect(addr: Optional[str] = None) -> None: - """ - Connect to a remote Rerun Viewer on the given ip:port. - - Requires that you first start a Rerun Viewer, e.g. with 'python -m rerun' - - Parameters - ---------- - addr : str - The ip:port to connect to - - """ - bindings.connect(addr) - - -def spawn_viewer(port: int = 9876) -> None: - """ - Spawn a Rerun Viewer, listening on the given port. - - Parameters - ---------- - port : int - The port to listen on. - - """ - import subprocess - import sys - from time import sleep - - # sys.executable: the absolute path of the executable binary for the Python interpreter - python_executable = sys.executable - if python_executable is None: - python_executable = "python3" - - # start_new_session=True ensures the spawned process does NOT die when - # we hit ctrl-c in the terminal running the parent Python process. - rerun_process = subprocess.Popen([python_executable, "-m", "rerun", "--port", str(port)], start_new_session=True) - print(f"Spawned Rerun Viewer with pid {rerun_process.pid}") - - # TODO(emilk): figure out a way to postpone connecting until the rerun viewer is listening. - # For instance, wait until it prints "Hosting a SDK server over TCP at …" - sleep(0.2) # almost as good as waiting the correct amount of time - - -def serve() -> None: - """ - Serve a Rerun Web Viewer. - - WARNING: This is an experimental feature. - """ - bindings.serve() - - -def disconnect() -> None: - """Disconnect from the remote rerun server (if any).""" - bindings.disconnect() - - -def show() -> None: - """ - Show previously logged data. - - This only works if you have not called `connect`. - - This will clear the logged data after showing it. - - NOTE: There is a bug which causes this function to only work once on some platforms. - - """ - bindings.show() - - -def save(path: str) -> None: - """ - Save previously logged data to a file. - - This only works if you have not called `connect`. - - This will clear the logged data after saving. - - Parameters - ---------- - path : str - The path to save the data to. - - """ - bindings.save(path) - - -def set_time_sequence(timeline: str, sequence: Optional[int]) -> None: - """ - Set the current time for this thread as an integer sequence. - - Used for all subsequent logging on the same thread, - until the next call to `set_time_sequence`. - - For instance: `set_time_sequence("frame_nr", frame_nr)`. - - You can remove a timeline again using `set_time_sequence("frame_nr", None)`. - - There is no requirement of monoticity. You can move the time backwards if you like. - - Parameters - ---------- - timeline : str - The name of the timeline to set the time for. - sequence : int - The current time on the timeline in integer units. - - """ - bindings.set_time_sequence(timeline, sequence) - - -def set_time_seconds(timeline: str, seconds: Optional[float]) -> None: - """ - Set the current time for this thread in seconds. - - Used for all subsequent logging on the same thread, - until the next call to `set_time_seconds`. - - For instance: `set_time_seconds("capture_time", seconds_since_unix_epoch)`. - - You can remove a timeline again using `set_time_seconds("capture_time", None)`. - - The argument should be in seconds, and should be measured either from the - unix epoch (1970-01-01), or from some recent time (e.g. your program startup). - - The bindings has a built-in time which is `log_time`, and is logged as seconds - since unix epoch. - - There is no requirement of monoticity. You can move the time backwards if you like. - - Parameters - ---------- - timeline : str - The name of the timeline to set the time for. - seconds : float - The current time on the timeline in seconds. - - """ - bindings.set_time_seconds(timeline, seconds) - - -def set_time_nanos(timeline: str, nanos: Optional[int]) -> None: - """ - Set the current time for this thread. - - Used for all subsequent logging on the same thread, - until the next call to `set_time_nanos`. - - For instance: `set_time_nanos("capture_time", nanos_since_unix_epoch)`. - - You can remove a timeline again using `set_time_nanos("capture_time", None)`. - - The argument should be in nanoseconds, and should be measured either from the - unix epoch (1970-01-01), or from some recent time (e.g. your program startup). - - The bindings has a built-in time which is `log_time`, and is logged as nanos since - unix epoch. - - There is no requirement of monoticity. You can move the time backwards if you like. - - Parameters - ---------- - timeline : str - The name of the timeline to set the time for. - nanos : int - The current time on the timeline in nanoseconds. - - """ - bindings.set_time_nanos(timeline, nanos) +del sys.modules["rerun"] +sys.modules["rerun"] = __import__("rerun") diff --git a/rerun_py/rerun_sdk.pth b/rerun_py/rerun_sdk.pth new file mode 100644 index 000000000000..bec31610050c --- /dev/null +++ b/rerun_py/rerun_sdk.pth @@ -0,0 +1 @@ +rerun_sdk diff --git a/rerun_py/rerun_sdk/rerun/__init__.py b/rerun_py/rerun_sdk/rerun/__init__.py new file mode 100644 index 000000000000..e006a9a63f28 --- /dev/null +++ b/rerun_py/rerun_sdk/rerun/__init__.py @@ -0,0 +1,349 @@ +"""The Rerun Python SDK, which is a wrapper around the rerun_sdk crate.""" + +import atexit +from typing import Optional + +import rerun_bindings as bindings # type: ignore[attr-defined] +from rerun.log import log_cleared +from rerun.log.annotation import log_annotation_context +from rerun.log.arrow import log_arrow +from rerun.log.bounding_box import log_obb +from rerun.log.camera import log_pinhole +from rerun.log.extension_components import log_extension_components +from rerun.log.file import log_image_file, log_mesh_file +from rerun.log.image import log_depth_image, log_image, log_segmentation_image +from rerun.log.lines import log_line_segments, log_path +from rerun.log.mesh import log_mesh, log_meshes +from rerun.log.points import log_point, log_points +from rerun.log.rects import log_rect, log_rects +from rerun.log.scalar import log_scalar +from rerun.log.tensor import log_tensor +from rerun.log.text import log_text_entry +from rerun.log.transform import log_rigid3, log_unknown_transform, log_view_coordinates + +__all__ = [ + "bindings", + "components", + "log_annotation_context", + "log_arrow", + "log_cleared", + "log_cleared", + "log_depth_image", + "log_image", + "log_image_file", + "log_line_segments", + "log_mesh", + "log_meshes", + "log_mesh_file", + "log_obb", + "log_path", + "log_pinhole", + "log_point", + "log_points", + "log_rect", + "log_rects", + "log_rigid3", + "log_scalar", + "log_segmentation_image", + "log_tensor", + "log_text_entry", + "log_unknown_transform", + "log_extension_components", + "log_view_coordinates", +] + + +def rerun_shutdown() -> None: + bindings.shutdown() + + +atexit.register(rerun_shutdown) + + +def unregister_shutdown() -> None: + atexit.unregister(rerun_shutdown) + + +# ----------------------------------------------------------------------------- + + +def get_recording_id() -> str: + """ + Get the recording ID that this process is logging to, as a UUIDv4. + + The default recording_id is based on `multiprocessing.current_process().authkey` + which means that all processes spawned with `multiprocessing` + will have the same default recording_id. + + If you are not using `multiprocessing` and still want several different Python + processes to log to the same Rerun instance (and be part of the same recording), + you will need to manually assign them all the same recording_id. + Any random UUIDv4 will work, or copy the recording id for the parent process. + + Returns + ------- + str + The recording ID that this process is logging to. + + """ + return str(bindings.get_recording_id()) + + +def set_recording_id(value: str) -> None: + """ + Set the recording ID that this process is logging to, as a UUIDv4. + + The default recording_id is based on `multiprocessing.current_process().authkey` + which means that all processes spawned with `multiprocessing` + will have the same default recording_id. + + If you are not using `multiprocessing` and still want several different Python + processes to log to the same Rerun instance (and be part of the same recording), + you will need to manually assign them all the same recording_id. + Any random UUIDv4 will work, or copy the recording id for the parent process. + + Parameters + ---------- + value : str + The recording ID to use for this process. + + """ + bindings.set_recording_id(value) + + +def init(application_id: str, spawn_and_connect: bool = False) -> None: + """ + Initialize the Rerun SDK with a user-chosen application id (name). + + Parameters + ---------- + application_id : str + Your Rerun recordings will be categorized by this application id, so + try to pick a unique one for each application that uses the Rerun SDK. + + For instance, if you have one application doing object detection + and another doing camera calibration, you could have + `rerun.init("object_detector")` and `rerun.init("calibrator")`. + spawn_and_connect : bool + Spawn a Rerun Viewer and stream logging data to it. + Short for calling `spawn_and_connect` separately. + If you don't call this, log events will be buffered indefinitely until + you call either `connect`, `show`, or `save` + + """ + app_path = None + + # NOTE: It'd be even nicer to do such thing on the Rust-side so that this little trick would + # only need to be written once and just work for all languages out of the box... unfortunately + # we lose most of the details of the python part of the backtrace once we go over the bridge. + # + # Still, better than nothing! + try: + import inspect + import pathlib + + app_path = pathlib.Path(inspect.stack()[1][1]).resolve() + except Exception: + pass + + bindings.init(application_id, app_path) + + if spawn_and_connect: + _spawn_and_connect() + + +def spawn_and_connect(port: int = 9876) -> None: + """ + Spawn a Rerun Viewer and stream logging data to it. + + This is often the easiest and best way to use Rerun. + Just call this once at the start of your program. + + Parameters + ---------- + port : int + The port to connect to + + See Also + -------- + * [rerun.connect][] + + """ + spawn_viewer(port) + connect(f"127.0.0.1:{port}") + + +_spawn_and_connect = spawn_and_connect # we need this because Python scoping is horrible + + +def connect(addr: Optional[str] = None) -> None: + """ + Connect to a remote Rerun Viewer on the given ip:port. + + Requires that you first start a Rerun Viewer, e.g. with 'python -m rerun' + + Parameters + ---------- + addr : str + The ip:port to connect to + + """ + bindings.connect(addr) + + +def spawn_viewer(port: int = 9876) -> None: + """ + Spawn a Rerun Viewer, listening on the given port. + + Parameters + ---------- + port : int + The port to listen on. + + """ + import subprocess + import sys + from time import sleep + + # sys.executable: the absolute path of the executable binary for the Python interpreter + python_executable = sys.executable + if python_executable is None: + python_executable = "python3" + + # start_new_session=True ensures the spawned process does NOT die when + # we hit ctrl-c in the terminal running the parent Python process. + rerun_process = subprocess.Popen([python_executable, "-m", "rerun", "--port", str(port)], start_new_session=True) + print(f"Spawned Rerun Viewer with pid {rerun_process.pid}") + + # TODO(emilk): figure out a way to postpone connecting until the rerun viewer is listening. + # For instance, wait until it prints "Hosting a SDK server over TCP at …" + sleep(0.2) # almost as good as waiting the correct amount of time + + +def serve() -> None: + """ + Serve a Rerun Web Viewer. + + WARNING: This is an experimental feature. + """ + bindings.serve() + + +def disconnect() -> None: + """Disconnect from the remote rerun server (if any).""" + bindings.disconnect() + + +def show() -> None: + """ + Show previously logged data. + + This only works if you have not called `connect`. + + This will clear the logged data after showing it. + + NOTE: There is a bug which causes this function to only work once on some platforms. + + """ + bindings.show() + + +def save(path: str) -> None: + """ + Save previously logged data to a file. + + This only works if you have not called `connect`. + + This will clear the logged data after saving. + + Parameters + ---------- + path : str + The path to save the data to. + + """ + bindings.save(path) + + +def set_time_sequence(timeline: str, sequence: Optional[int]) -> None: + """ + Set the current time for this thread as an integer sequence. + + Used for all subsequent logging on the same thread, + until the next call to `set_time_sequence`. + + For instance: `set_time_sequence("frame_nr", frame_nr)`. + + You can remove a timeline again using `set_time_sequence("frame_nr", None)`. + + There is no requirement of monoticity. You can move the time backwards if you like. + + Parameters + ---------- + timeline : str + The name of the timeline to set the time for. + sequence : int + The current time on the timeline in integer units. + + """ + bindings.set_time_sequence(timeline, sequence) + + +def set_time_seconds(timeline: str, seconds: Optional[float]) -> None: + """ + Set the current time for this thread in seconds. + + Used for all subsequent logging on the same thread, + until the next call to `set_time_seconds`. + + For instance: `set_time_seconds("capture_time", seconds_since_unix_epoch)`. + + You can remove a timeline again using `set_time_seconds("capture_time", None)`. + + The argument should be in seconds, and should be measured either from the + unix epoch (1970-01-01), or from some recent time (e.g. your program startup). + + The bindings has a built-in time which is `log_time`, and is logged as seconds + since unix epoch. + + There is no requirement of monoticity. You can move the time backwards if you like. + + Parameters + ---------- + timeline : str + The name of the timeline to set the time for. + seconds : float + The current time on the timeline in seconds. + + """ + bindings.set_time_seconds(timeline, seconds) + + +def set_time_nanos(timeline: str, nanos: Optional[int]) -> None: + """ + Set the current time for this thread. + + Used for all subsequent logging on the same thread, + until the next call to `set_time_nanos`. + + For instance: `set_time_nanos("capture_time", nanos_since_unix_epoch)`. + + You can remove a timeline again using `set_time_nanos("capture_time", None)`. + + The argument should be in nanoseconds, and should be measured either from the + unix epoch (1970-01-01), or from some recent time (e.g. your program startup). + + The bindings has a built-in time which is `log_time`, and is logged as nanos since + unix epoch. + + There is no requirement of monoticity. You can move the time backwards if you like. + + Parameters + ---------- + timeline : str + The name of the timeline to set the time for. + nanos : int + The current time on the timeline in nanoseconds. + + """ + bindings.set_time_nanos(timeline, nanos) diff --git a/rerun_py/rerun/__main__.py b/rerun_py/rerun_sdk/rerun/__main__.py similarity index 64% rename from rerun_py/rerun/__main__.py rename to rerun_py/rerun_sdk/rerun/__main__.py index be1e5244a4c9..8213af58ec28 100644 --- a/rerun_py/rerun/__main__.py +++ b/rerun_py/rerun_sdk/rerun/__main__.py @@ -2,13 +2,13 @@ import sys -from rerun import rerun_bindings, unregister_shutdown # type: ignore[attr-defined] +from rerun import bindings, unregister_shutdown # type: ignore[attr-defined] def main() -> None: # We don't need to call shutdown in this case. Rust should be handling everything unregister_shutdown() - exit(rerun_bindings.main(sys.argv)) + exit(bindings.main(sys.argv)) if __name__ == "__main__": diff --git a/rerun_py/rerun/color_conversion.py b/rerun_py/rerun_sdk/rerun/color_conversion.py similarity index 100% rename from rerun_py/rerun/color_conversion.py rename to rerun_py/rerun_sdk/rerun/color_conversion.py diff --git a/rerun_py/rerun/components/__init__.py b/rerun_py/rerun_sdk/rerun/components/__init__.py similarity index 100% rename from rerun_py/rerun/components/__init__.py rename to rerun_py/rerun_sdk/rerun/components/__init__.py diff --git a/rerun_py/rerun/components/annotation.py b/rerun_py/rerun_sdk/rerun/components/annotation.py similarity index 100% rename from rerun_py/rerun/components/annotation.py rename to rerun_py/rerun_sdk/rerun/components/annotation.py diff --git a/rerun_py/rerun/components/arrow.py b/rerun_py/rerun_sdk/rerun/components/arrow.py similarity index 100% rename from rerun_py/rerun/components/arrow.py rename to rerun_py/rerun_sdk/rerun/components/arrow.py diff --git a/rerun_py/rerun/components/box.py b/rerun_py/rerun_sdk/rerun/components/box.py similarity index 100% rename from rerun_py/rerun/components/box.py rename to rerun_py/rerun_sdk/rerun/components/box.py diff --git a/rerun_py/rerun/components/color.py b/rerun_py/rerun_sdk/rerun/components/color.py similarity index 100% rename from rerun_py/rerun/components/color.py rename to rerun_py/rerun_sdk/rerun/components/color.py diff --git a/rerun_py/rerun/components/instance.py b/rerun_py/rerun_sdk/rerun/components/instance.py similarity index 100% rename from rerun_py/rerun/components/instance.py rename to rerun_py/rerun_sdk/rerun/components/instance.py diff --git a/rerun_py/rerun/components/label.py b/rerun_py/rerun_sdk/rerun/components/label.py similarity index 100% rename from rerun_py/rerun/components/label.py rename to rerun_py/rerun_sdk/rerun/components/label.py diff --git a/rerun_py/rerun/components/linestrip.py b/rerun_py/rerun_sdk/rerun/components/linestrip.py similarity index 100% rename from rerun_py/rerun/components/linestrip.py rename to rerun_py/rerun_sdk/rerun/components/linestrip.py diff --git a/rerun_py/rerun/components/point.py b/rerun_py/rerun_sdk/rerun/components/point.py similarity index 100% rename from rerun_py/rerun/components/point.py rename to rerun_py/rerun_sdk/rerun/components/point.py diff --git a/rerun_py/rerun/components/quaternion.py b/rerun_py/rerun_sdk/rerun/components/quaternion.py similarity index 100% rename from rerun_py/rerun/components/quaternion.py rename to rerun_py/rerun_sdk/rerun/components/quaternion.py diff --git a/rerun_py/rerun/components/radius.py b/rerun_py/rerun_sdk/rerun/components/radius.py similarity index 100% rename from rerun_py/rerun/components/radius.py rename to rerun_py/rerun_sdk/rerun/components/radius.py diff --git a/rerun_py/rerun/components/rect2d.py b/rerun_py/rerun_sdk/rerun/components/rect2d.py similarity index 100% rename from rerun_py/rerun/components/rect2d.py rename to rerun_py/rerun_sdk/rerun/components/rect2d.py diff --git a/rerun_py/rerun/components/scalar.py b/rerun_py/rerun_sdk/rerun/components/scalar.py similarity index 100% rename from rerun_py/rerun/components/scalar.py rename to rerun_py/rerun_sdk/rerun/components/scalar.py diff --git a/rerun_py/rerun/components/tensor.py b/rerun_py/rerun_sdk/rerun/components/tensor.py similarity index 100% rename from rerun_py/rerun/components/tensor.py rename to rerun_py/rerun_sdk/rerun/components/tensor.py diff --git a/rerun_py/rerun/components/text_entry.py b/rerun_py/rerun_sdk/rerun/components/text_entry.py similarity index 100% rename from rerun_py/rerun/components/text_entry.py rename to rerun_py/rerun_sdk/rerun/components/text_entry.py diff --git a/rerun_py/rerun/components/vec.py b/rerun_py/rerun_sdk/rerun/components/vec.py similarity index 100% rename from rerun_py/rerun/components/vec.py rename to rerun_py/rerun_sdk/rerun/components/vec.py diff --git a/rerun_py/rerun/log/__init__.py b/rerun_py/rerun_sdk/rerun/log/__init__.py similarity index 100% rename from rerun_py/rerun/log/__init__.py rename to rerun_py/rerun_sdk/rerun/log/__init__.py diff --git a/rerun_py/rerun/log/annotation.py b/rerun_py/rerun_sdk/rerun/log/annotation.py similarity index 100% rename from rerun_py/rerun/log/annotation.py rename to rerun_py/rerun_sdk/rerun/log/annotation.py diff --git a/rerun_py/rerun/log/arrow.py b/rerun_py/rerun_sdk/rerun/log/arrow.py similarity index 100% rename from rerun_py/rerun/log/arrow.py rename to rerun_py/rerun_sdk/rerun/log/arrow.py diff --git a/rerun_py/rerun/log/bounding_box.py b/rerun_py/rerun_sdk/rerun/log/bounding_box.py similarity index 100% rename from rerun_py/rerun/log/bounding_box.py rename to rerun_py/rerun_sdk/rerun/log/bounding_box.py diff --git a/rerun_py/rerun/log/camera.py b/rerun_py/rerun_sdk/rerun/log/camera.py similarity index 100% rename from rerun_py/rerun/log/camera.py rename to rerun_py/rerun_sdk/rerun/log/camera.py diff --git a/rerun_py/rerun/log/error_utils.py b/rerun_py/rerun_sdk/rerun/log/error_utils.py similarity index 100% rename from rerun_py/rerun/log/error_utils.py rename to rerun_py/rerun_sdk/rerun/log/error_utils.py diff --git a/rerun_py/rerun/log/extension_components.py b/rerun_py/rerun_sdk/rerun/log/extension_components.py similarity index 100% rename from rerun_py/rerun/log/extension_components.py rename to rerun_py/rerun_sdk/rerun/log/extension_components.py diff --git a/rerun_py/rerun/log/file.py b/rerun_py/rerun_sdk/rerun/log/file.py similarity index 100% rename from rerun_py/rerun/log/file.py rename to rerun_py/rerun_sdk/rerun/log/file.py diff --git a/rerun_py/rerun/log/image.py b/rerun_py/rerun_sdk/rerun/log/image.py similarity index 100% rename from rerun_py/rerun/log/image.py rename to rerun_py/rerun_sdk/rerun/log/image.py diff --git a/rerun_py/rerun/log/lines.py b/rerun_py/rerun_sdk/rerun/log/lines.py similarity index 100% rename from rerun_py/rerun/log/lines.py rename to rerun_py/rerun_sdk/rerun/log/lines.py diff --git a/rerun_py/rerun/log/mesh.py b/rerun_py/rerun_sdk/rerun/log/mesh.py similarity index 100% rename from rerun_py/rerun/log/mesh.py rename to rerun_py/rerun_sdk/rerun/log/mesh.py diff --git a/rerun_py/rerun/log/points.py b/rerun_py/rerun_sdk/rerun/log/points.py similarity index 100% rename from rerun_py/rerun/log/points.py rename to rerun_py/rerun_sdk/rerun/log/points.py diff --git a/rerun_py/rerun/log/rects.py b/rerun_py/rerun_sdk/rerun/log/rects.py similarity index 100% rename from rerun_py/rerun/log/rects.py rename to rerun_py/rerun_sdk/rerun/log/rects.py diff --git a/rerun_py/rerun/log/scalar.py b/rerun_py/rerun_sdk/rerun/log/scalar.py similarity index 100% rename from rerun_py/rerun/log/scalar.py rename to rerun_py/rerun_sdk/rerun/log/scalar.py diff --git a/rerun_py/rerun/log/tensor.py b/rerun_py/rerun_sdk/rerun/log/tensor.py similarity index 100% rename from rerun_py/rerun/log/tensor.py rename to rerun_py/rerun_sdk/rerun/log/tensor.py diff --git a/rerun_py/rerun/log/text.py b/rerun_py/rerun_sdk/rerun/log/text.py similarity index 100% rename from rerun_py/rerun/log/text.py rename to rerun_py/rerun_sdk/rerun/log/text.py diff --git a/rerun_py/rerun/log/transform.py b/rerun_py/rerun_sdk/rerun/log/transform.py similarity index 100% rename from rerun_py/rerun/log/transform.py rename to rerun_py/rerun_sdk/rerun/log/transform.py diff --git a/rerun_py/rerun/py.typed b/rerun_py/rerun_sdk/rerun/py.typed similarity index 100% rename from rerun_py/rerun/py.typed rename to rerun_py/rerun_sdk/rerun/py.typed diff --git a/rerun_py/rerun_sdk/rerun_demo/demo.rrd b/rerun_py/rerun_sdk/rerun_demo/demo.rrd new file mode 100644 index 000000000000..4b74c766021a Binary files /dev/null and b/rerun_py/rerun_sdk/rerun_demo/demo.rrd differ