diff --git a/.cache/calibration/aloha_default/left_follower.json b/.cache/calibration/aloha_default/left_follower.json
deleted file mode 100644
index 336c238a00..0000000000
--- a/.cache/calibration/aloha_default/left_follower.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "homing_offset": [
- 2048,
- 3072,
- 3072,
- -1024,
- -1024,
- 2048,
- -2048,
- 2048,
- -2048
- ],
- "drive_mode": [
- 1,
- 1,
- 1,
- 0,
- 0,
- 1,
- 0,
- 1,
- 0
- ],
- "start_pos": [
- 2015,
- 3058,
- 3061,
- 1071,
- 1071,
- 2035,
- 2152,
- 2029,
- 2499
- ],
- "end_pos": [
- -1008,
- -1963,
- -1966,
- 2141,
- 2143,
- -971,
- 3043,
- -1077,
- 3144
- ],
- "calib_mode": [
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "LINEAR"
- ],
- "motor_names": [
- "waist",
- "shoulder",
- "shoulder_shadow",
- "elbow",
- "elbow_shadow",
- "forearm_roll",
- "wrist_angle",
- "wrist_rotate",
- "gripper"
- ]
-}
diff --git a/.cache/calibration/aloha_default/left_leader.json b/.cache/calibration/aloha_default/left_leader.json
deleted file mode 100644
index d933f2bab6..0000000000
--- a/.cache/calibration/aloha_default/left_leader.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "homing_offset": [
- 2048,
- 3072,
- 3072,
- -1024,
- -1024,
- 2048,
- -2048,
- 2048,
- -1024
- ],
- "drive_mode": [
- 1,
- 1,
- 1,
- 0,
- 0,
- 1,
- 0,
- 1,
- 0
- ],
- "start_pos": [
- 2035,
- 3024,
- 3019,
- 979,
- 981,
- 1982,
- 2166,
- 2124,
- 1968
- ],
- "end_pos": [
- -990,
- -2017,
- -2015,
- 2078,
- 2076,
- -1030,
- 3117,
- -1016,
- 2556
- ],
- "calib_mode": [
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "LINEAR"
- ],
- "motor_names": [
- "waist",
- "shoulder",
- "shoulder_shadow",
- "elbow",
- "elbow_shadow",
- "forearm_roll",
- "wrist_angle",
- "wrist_rotate",
- "gripper"
- ]
-}
diff --git a/.cache/calibration/aloha_default/right_follower.json b/.cache/calibration/aloha_default/right_follower.json
deleted file mode 100644
index bc69dfafd0..0000000000
--- a/.cache/calibration/aloha_default/right_follower.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "homing_offset": [
- 2048,
- 3072,
- 3072,
- -1024,
- -1024,
- 2048,
- -2048,
- 2048,
- -2048
- ],
- "drive_mode": [
- 1,
- 1,
- 1,
- 0,
- 0,
- 1,
- 0,
- 1,
- 0
- ],
- "start_pos": [
- 2056,
- 2895,
- 2896,
- 1191,
- 1190,
- 2018,
- 2051,
- 2056,
- 2509
- ],
- "end_pos": [
- -1040,
- -2004,
- -2006,
- 2126,
- 2127,
- -1010,
- 3050,
- -1117,
- 3143
- ],
- "calib_mode": [
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "LINEAR"
- ],
- "motor_names": [
- "waist",
- "shoulder",
- "shoulder_shadow",
- "elbow",
- "elbow_shadow",
- "forearm_roll",
- "wrist_angle",
- "wrist_rotate",
- "gripper"
- ]
-}
diff --git a/.cache/calibration/aloha_default/right_leader.json b/.cache/calibration/aloha_default/right_leader.json
deleted file mode 100644
index d96d1de9b2..0000000000
--- a/.cache/calibration/aloha_default/right_leader.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "homing_offset": [
- 2048,
- 3072,
- 3072,
- -1024,
- -1024,
- 2048,
- -2048,
- 2048,
- -2048
- ],
- "drive_mode": [
- 1,
- 1,
- 1,
- 0,
- 0,
- 1,
- 0,
- 1,
- 0
- ],
- "start_pos": [
- 2068,
- 3034,
- 3030,
- 1038,
- 1041,
- 1991,
- 1948,
- 2090,
- 1985
- ],
- "end_pos": [
- -1025,
- -2014,
- -2015,
- 2058,
- 2060,
- -955,
- 3091,
- -940,
- 2576
- ],
- "calib_mode": [
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "DEGREE",
- "LINEAR"
- ],
- "motor_names": [
- "waist",
- "shoulder",
- "shoulder_shadow",
- "elbow",
- "elbow_shadow",
- "forearm_roll",
- "wrist_angle",
- "wrist_rotate",
- "gripper"
- ]
-}
diff --git a/.dockerignore b/.dockerignore
index b8c1be1536..c0d8a84b56 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Misc
.git
tmp
@@ -59,7 +73,7 @@ pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
-!tests/data
+!tests/artifacts
htmlcov/
.tox/
.nox/
diff --git a/.gitattributes b/.gitattributes
index 7da36424c1..7d89f37b2b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,6 +1,21 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
*.memmap filter=lfs diff=lfs merge=lfs -text
*.stl filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.mp4 filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.json !text !filter !merge !diff
+tests/artifacts/cameras/*.png filter=lfs diff=lfs merge=lfs -text
+*.bag filter=lfs diff=lfs merge=lfs -text
diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml
index 7cbed67368..2fb23051c1 100644
--- a/.github/ISSUE_TEMPLATE/bug-report.yml
+++ b/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
name: "\U0001F41B Bug Report"
description: Submit a bug report to help us improve LeRobot
body:
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 2e6fd44908..22f1ee3d7f 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,33 +1,40 @@
## What this does
+
Explain what this PR does. Feel free to tag your PR with the appropriate label(s).
Examples:
-| Title | Label |
+| Title | Label |
|----------------------|-----------------|
-| Fixes #[issue] | (🐛 Bug) |
-| Adds new dataset | (🗃️ Dataset) |
-| Optimizes something | (⚡️ Performance) |
+| Fixes #[issue] | (🐛 Bug) |
+| Adds new dataset | (🗃️ Dataset) |
+| Optimizes something | (⚡️ Performance) |
## How it was tested
+
Explain/show how you tested your changes.
Examples:
+
- Added `test_something` in `tests/test_stuff.py`.
- Added `new_feature` and checked that training converges with policy X on dataset/environment Y.
- Optimized `some_function`, it now runs X times faster than previously.
## How to checkout & try? (for the reviewer)
+
Provide a simple way for the reviewer to try out your changes.
Examples:
+
```bash
pytest -sx tests/test_stuff.py::test_something
```
+
```bash
-python lerobot/scripts/train.py --some.option=true
+python -m lerobot.scripts.train --some.option=true
```
## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR
+
**Note**: Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
members/contributors who may be interested in your PR. Try to avoid tagging more than 3 people.
diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml
index f20de9784d..20974b85a6 100644
--- a/.github/workflows/build-docker-images.yml
+++ b/.github/workflows/build-docker-images.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Inspired by
# https://github.com/huggingface/peft/blob/main/.github/workflows/build_docker_images.yml
name: Builds
@@ -8,6 +22,8 @@ on:
schedule:
- cron: "0 1 * * *"
+permissions: {}
+
env:
PYTHON_VERSION: "3.10"
@@ -24,21 +40,24 @@ jobs:
git lfs install
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+ with:
+ cache-binary: false
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
lfs: true
+ persist-credentials: false
- name: Login to DockerHub
- uses: docker/login-action@v3
+ uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push CPU
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
with:
context: .
file: ./docker/lerobot-cpu/Dockerfile
@@ -59,21 +78,24 @@ jobs:
git lfs install
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+ with:
+ cache-binary: false
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
lfs: true
+ persist-credentials: false
- name: Login to DockerHub
- uses: docker/login-action@v3
+ uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
with:
context: .
file: ./docker/lerobot-gpu/Dockerfile
@@ -88,19 +110,23 @@ jobs:
group: aws-general-8-plus
steps:
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+ with:
+ cache-binary: false
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ persist-credentials: false
- name: Login to DockerHub
- uses: docker/login-action@v3
+ uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU dev
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
with:
context: .
file: ./docker/lerobot-gpu-dev/Dockerfile
diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml
new file mode 100644
index 0000000000..884e2e4b55
--- /dev/null
+++ b/.github/workflows/build_documentation.yml
@@ -0,0 +1,23 @@
+name: Build documentation
+
+on:
+ workflow_dispatch:
+ push:
+ paths:
+ - "docs/**"
+ branches:
+ - main
+ - doc-builder*
+ - v*-release
+
+
+jobs:
+ build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
+ with:
+ commit_sha: ${{ github.sha }}
+ package: lerobot
+ additional_args: --not_python_module
+ secrets:
+ token: ${{ secrets.HUGGINGFACE_PUSH }}
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml
new file mode 100644
index 0000000000..51bab10d5c
--- /dev/null
+++ b/.github/workflows/build_pr_documentation.yml
@@ -0,0 +1,19 @@
+name: Build PR Documentation
+
+on:
+ pull_request:
+ paths:
+ - "docs/**"
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
+ with:
+ commit_sha: ${{ github.event.pull_request.head.sha }}
+ pr_number: ${{ github.event.number }}
+ package: lerobot
+ additional_args: --not_python_module
diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml
index bbee19a17e..728016915e 100644
--- a/.github/workflows/nightly-tests.yml
+++ b/.github/workflows/nightly-tests.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Inspired by
# https://github.com/huggingface/peft/blob/main/.github/workflows/nightly.yml
name: Nightly
@@ -7,6 +21,8 @@ on:
schedule:
- cron: "0 2 * * *"
+permissions: {}
+
# env:
# SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
jobs:
@@ -17,7 +33,7 @@ jobs:
runs-on:
group: aws-general-8-plus
container:
- image: huggingface/lerobot-cpu:latest
+ image: huggingface/lerobot-cpu:latest # zizmor: ignore[unpinned-images]
options: --shm-size "16gb"
credentials:
username: ${{ secrets.DOCKERHUB_USERNAME }}
@@ -28,7 +44,7 @@ jobs:
working-directory: /lerobot
steps:
- name: Tests
- run: pytest -v --cov=./lerobot --disable-warnings tests
+ run: pytest -v --cov=./src/lerobot --disable-warnings tests
- name: Tests end-to-end
run: make test-end-to-end
@@ -44,7 +60,7 @@ jobs:
CUDA_VISIBLE_DEVICES: "0"
TEST_TYPE: "single_gpu"
container:
- image: huggingface/lerobot-gpu:latest
+ image: huggingface/lerobot-gpu:latest # zizmor: ignore[unpinned-images]
options: --gpus all --shm-size "16gb"
credentials:
username: ${{ secrets.DOCKERHUB_USERNAME }}
@@ -58,7 +74,7 @@ jobs:
run: nvidia-smi
- name: Test
- run: pytest -v --cov=./lerobot --cov-report=xml --disable-warnings tests
+ run: pytest -v --cov=./src/lerobot --cov-report=xml --disable-warnings tests
# TODO(aliberts): Link with HF Codecov account
# - name: Upload coverage reports to Codecov with GitHub Action
# uses: codecov/codecov-action@v4
diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml
index c245345f47..1c048c4fe9 100644
--- a/.github/workflows/quality.yml
+++ b/.github/workflows/quality.yml
@@ -1,15 +1,29 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
name: Quality
on:
workflow_dispatch:
workflow_call:
pull_request:
- branches:
- - main
push:
branches:
- main
+permissions: {}
+
env:
PYTHON_VERSION: "3.10"
@@ -19,10 +33,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
- uses: actions/checkout@v3
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ persist-credentials: false
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4.9.1
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -30,55 +46,27 @@ jobs:
id: get-ruff-version
run: |
RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
- echo "RUFF_VERSION=${RUFF_VERSION}" >> $GITHUB_ENV
+ echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
- name: Install Ruff
- run: python -m pip install "ruff==${{ env.RUFF_VERSION }}"
+ env:
+ RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
+ run: python -m pip install "ruff==${RUFF_VERSION}"
- name: Ruff check
- run: ruff check
+ run: ruff check --output-format=github
- name: Ruff format
run: ruff format --diff
-
- poetry_check:
- name: Poetry check
+ typos:
+ name: Typos
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
- uses: actions/checkout@v3
-
- - name: Install poetry
- run: pipx install "poetry<2.0.0"
-
- - name: Poetry check
- run: poetry check
-
-
- poetry_relax:
- name: Poetry relax
- runs-on: ubuntu-latest
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v3
-
- - name: Install poetry
- run: pipx install "poetry<2.0.0"
-
- - name: Install poetry-relax
- run: poetry self add poetry-relax
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ persist-credentials: false
- - name: Poetry relax
- id: poetry_relax
- run: |
- output=$(poetry relax --check 2>&1)
- if echo "$output" | grep -q "Proposing updates"; then
- echo "$output"
- echo ""
- echo "Some dependencies have caret '^' version requirement added by poetry by default."
- echo "Please replace them with '>='. You can do this by hand or use poetry-relax to do this."
- exit 1
- else
- echo "$output"
- fi
+ - name: typos-action
+ uses: crate-ci/typos@db35ee91e80fbb447f33b0e5fbddb24d2a1a884f # v1.29.10
diff --git a/.github/workflows/test-docker-build.yml b/.github/workflows/test-docker-build.yml
index 979897b0e1..c338134188 100644
--- a/.github/workflows/test-docker-build.yml
+++ b/.github/workflows/test-docker-build.yml
@@ -1,14 +1,30 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Inspired by
# https://github.com/huggingface/peft/blob/main/.github/workflows/test-docker-build.yml
name: Test Dockerfiles
on:
pull_request:
- branches:
- - main
paths:
# Run only when DockerFile files are modified
- - "docker/**"
+ - "docker/lerobot-cpu/**"
+ - "docker/lerobot-gpu/**"
+ - "docker/lerobot-gpu-dev/**"
+
+permissions: {}
env:
PYTHON_VERSION: "3.10"
@@ -21,43 +37,46 @@ jobs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ persist-credentials: false
- name: Get changed files
id: changed-files
- uses: tj-actions/changed-files@v44
+ uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
with:
files: docker/**
json: "true"
- - name: Run step if only the files listed above change
+ - name: Run step if only the files listed above change # zizmor: ignore[template-injection]
if: steps.changed-files.outputs.any_changed == 'true'
id: set-matrix
- env:
- ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
run: |
echo "matrix=${{ steps.changed-files.outputs.all_changed_files}}" >> $GITHUB_OUTPUT
-
build_modified_dockerfiles:
name: Build modified Docker images
needs: get_changed_files
runs-on:
group: aws-general-8-plus
- if: ${{ needs.get_changed_files.outputs.matrix }} != ''
+ if: needs.get_changed_files.outputs.matrix != ''
strategy:
fail-fast: false
matrix:
docker-file: ${{ fromJson(needs.get_changed_files.outputs.matrix) }}
steps:
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+ with:
+ cache-binary: false
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ persist-credentials: false
- name: Build Docker image
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
with:
file: ${{ matrix.docker-file }}
context: .
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 53b37466a2..d6ea1d4040 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,29 +1,48 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
name: Tests
on:
pull_request:
- branches:
- - main
paths:
- - "lerobot/**"
+ - "src/**"
- "tests/**"
- "examples/**"
- ".github/**"
- - "poetry.lock"
+ - "pyproject.toml"
+ - ".pre-commit-config.yaml"
- "Makefile"
- ".cache/**"
push:
branches:
- main
paths:
- - "lerobot/**"
+ - "src/**"
- "tests/**"
- "examples/**"
- ".github/**"
- - "poetry.lock"
+ - "pyproject.toml"
+ - ".pre-commit-config.yaml"
- "Makefile"
- ".cache/**"
+permissions: {}
+
+env:
+ UV_VERSION: "0.6.0"
+
jobs:
pytest:
name: Pytest
@@ -31,9 +50,10 @@ jobs:
env:
MUJOCO_GL: egl
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
lfs: true # Ensure LFS files are pulled
+ persist-credentials: false
- name: Install apt dependencies
# portaudio19-dev is needed to install pyaudio
@@ -41,25 +61,19 @@ jobs:
sudo apt-get update && \
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
- - name: Install poetry
- run: |
- pipx install poetry && poetry config virtualenvs.in-project true
- echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
-
- # TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
- - name: Set up Python 3.10
- uses: actions/setup-python@v5
+ - name: Install uv and python
+ uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
with:
+ enable-cache: true
+ version: ${{ env.UV_VERSION }}
python-version: "3.10"
- cache: "poetry"
- - name: Install poetry dependencies
- run: |
- poetry install --all-extras
+ - name: Install lerobot (all extras)
+ run: uv sync --all-extras
- name: Test with pytest
run: |
- pytest tests -v --cov=./lerobot --durations=0 \
+ uv run pytest tests -v --cov=./src/lerobot --durations=0 \
-W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
-W ignore::UserWarning:torch.utils.data.dataloader:558 \
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
@@ -71,69 +85,66 @@ jobs:
env:
MUJOCO_GL: egl
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
lfs: true # Ensure LFS files are pulled
+ persist-credentials: false
- name: Install apt dependencies
run: sudo apt-get update && sudo apt-get install -y ffmpeg
- - name: Install poetry
- run: |
- pipx install poetry && poetry config virtualenvs.in-project true
- echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
-
- # TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
- - name: Set up Python 3.10
- uses: actions/setup-python@v5
+ - name: Install uv and python
+ uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
with:
+ enable-cache: true
+ version: ${{ env.UV_VERSION }}
python-version: "3.10"
- - name: Install poetry dependencies
- run: |
- poetry install --extras "test"
+ - name: Install lerobot
+ run: uv sync --extra "test"
- name: Test with pytest
run: |
- pytest tests -v --cov=./lerobot --durations=0 \
+ uv run pytest tests -v --cov=./src/lerobot --durations=0 \
-W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
-W ignore::UserWarning:torch.utils.data.dataloader:558 \
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
&& rm -rf tests/outputs outputs
- # TODO(aliberts, rcadene): redesign after v2 migration / removing hydra
- # end-to-end:
- # name: End-to-end
- # runs-on: ubuntu-latest
- # env:
- # MUJOCO_GL: egl
- # steps:
- # - uses: actions/checkout@v4
- # with:
- # lfs: true # Ensure LFS files are pulled
-
- # - name: Install apt dependencies
- # # portaudio19-dev is needed to install pyaudio
- # run: |
- # sudo apt-get update && \
- # sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
-
- # - name: Install poetry
- # run: |
- # pipx install poetry && poetry config virtualenvs.in-project true
- # echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
-
- # - name: Set up Python 3.10
- # uses: actions/setup-python@v5
- # with:
- # python-version: "3.10"
- # cache: "poetry"
-
- # - name: Install poetry dependencies
- # run: |
- # poetry install --all-extras
-
- # - name: Test end-to-end
- # run: |
- # make test-end-to-end \
- # && rm -rf outputs
+ end-to-end:
+ name: End-to-end
+ runs-on: ubuntu-latest
+ env:
+ MUJOCO_GL: egl
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ lfs: true # Ensure LFS files are pulled
+ persist-credentials: false
+
+ - name: Install apt dependencies
+ # portaudio19-dev is needed to install pyaudio
+ run: |
+ sudo apt-get update && \
+ sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
+
+ - name: Install uv and python
+ uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
+ with:
+ enable-cache: true
+ version: ${{ env.UV_VERSION }}
+ python-version: "3.10"
+
+ - name: Install lerobot (all extras)
+ run: |
+ uv venv
+ uv sync --all-extras
+
+ - name: venv
+ run: |
+ echo "PYTHON_PATH=${{ github.workspace }}/.venv/bin/python" >> $GITHUB_ENV
+
+ - name: Test end-to-end
+ run: |
+ make test-end-to-end \
+ && rm -rf outputs
diff --git a/.github/workflows/trufflehog.yml b/.github/workflows/trufflehog.yml
index d1dddab7d8..704a3baaa6 100644
--- a/.github/workflows/trufflehog.yml
+++ b/.github/workflows/trufflehog.yml
@@ -1,20 +1,35 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
on:
push:
name: Secret Leaks
-permissions:
- contents: read
+permissions: {}
jobs:
trufflehog:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
+ persist-credentials: false
+
- name: Secret Scanning
- uses: trufflesecurity/trufflehog@main
+ uses: trufflesecurity/trufflehog@90694bf9af66e7536abc5824e7a87246dbf933cb # v3.88.35
with:
extra_args: --only-verified
diff --git a/.github/workflows/upload_pr_documentation.yml b/.github/workflows/upload_pr_documentation.yml
new file mode 100644
index 0000000000..32665930bb
--- /dev/null
+++ b/.github/workflows/upload_pr_documentation.yml
@@ -0,0 +1,16 @@
+name: Upload PR Documentation
+
+on: # zizmor: ignore[dangerous-triggers] We follow the same pattern as in Transformers
+ workflow_run:
+ workflows: [ "Build PR Documentation" ]
+ types:
+ - completed
+
+jobs:
+ build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
+ uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
+ with:
+ package_name: lerobot
+ secrets:
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
+ comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 0e203a3946..c4d1f769f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,155 +1,175 @@
-# Logging
-logs
-tmp
-wandb
-
-# Data
-data
-outputs
-
-# Apple
-.DS_Store
-
-# VS Code
-.vscode
-
-# HPC
-nautilus/*.yaml
-*.key
-
-# Slurm
-sbatch*.sh
-
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+### Environments & Dependencies ###
+.env
+.venv
+env/
+venv/
+env.bak/
+venv.bak/
+.python-version
+__pypackages__/
+node_modules/
-# C extensions
-*.so
+# Lock files
+poetry.lock
+uv.lock
+Pipfile.lock
-# Distribution / packaging
-.Python
+### Build & Distribution ###
build/
-develop-eggs/
dist/
+sdist/
+wheels/
downloads/
eggs/
.eggs/
-lib/
-lib64/
parts/
-sdist/
var/
-wheels/
pip-wheel-metadata/
share/python-wheels/
+develop-eggs/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
+lib/
+lib64/
# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
+### Compiled & Cached Files ###
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+*.sage.py
+.cache/
+.ruff_cache/
+.mypy_cache/
+.pyre/
+.pytype/
+cython_debug/
-# Unit test / coverage reports
-!tests/data
+### Testing & Coverage ###
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
+.pytest_cache/
+.hypothesis/
nosetests.xml
coverage.xml
*.cover
*.py,cover
-.hypothesis/
-.pytest_cache/
+!tests/artifacts
-# Ignore .cache except calibration
-.cache/*
-!.cache/calibration/
-!.cache/calibration/**
+### Logs & Temporary Files ###
+logs/
+tmp/
+*.log
+pip-log.txt
+pip-delete-this-directory.txt
+celerybeat-schedule
+celerybeat.pid
-# Translations
-*.mo
-*.pot
+### IDE & Editor Config ###
+# VS Code
+.vscode/
+.devcontainer/
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
+# JetBrains / PyCharm
+.idea/
-# Flask stuff:
-instance/
-.webassets-cache
+# Spyder
+.spyderproject
+.spyproject
-# Scrapy stuff:
-.scrapy
+# Rope
+.ropeproject
-# Sphinx documentation
-docs/_build/
+# Vim
+*.swp
-# PyBuilder
-.pybuilder/
-target/
+# Other
+*~
-# Jupyter Notebook
-.ipynb_checkpoints
+### OS Specific ###
+# macOS
+.DS_Store
-# IPython
-profile_default/
-ipython_config.py
+# Windows
+Thumbs.db
-# pyenv
-.python-version
+### Framework & Tool Specific ###
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow
-__pypackages__/
+.Python
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
+# Django
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
-# SageMath parsed files
-*.sage.py
+# Flask
+instance/
+.webassets-cache
-# Environments
-.env
-.venv
-env/
-venv/
-env.bak/
-venv.bak/
+# Scrapy
+.scrapy
-# Spyder project settings
-.spyderproject
-.spyproject
+# Jupyter
+.ipynb_checkpoints/
+profile_default/
+ipython_config.py
-# Rope project settings
-.ropeproject
+# Sphinx
+docs/_build/
-# mkdocs documentation
+# MkDocs
/site
+# PyBuilder
+.pybuilder/
+target/
+
# mypy
-.mypy_cache/
.dmypy.json
dmypy.json
-# Pyre type checker
-.pyre/
+### HPC & Slurm ###
+nautilus/*.yaml
+*.key
+sbatch*.sh
-# pytype static type analyzer
-.pytype/
+### Miscellaneous ###
+# W&B
+wandb/
-# Cython debug symbols
-cython_debug/
+# Dev scripts
+.dev/
+
+# Data folders
+data/
+outputs/
+
+# Translations
+*.mo
+*.pot
+
+# Dev folders
+.cache/*
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 58eca32066..e509d6d88e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,11 +1,35 @@
-exclude: ^(tests/data)
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
default_language_version:
python: python3.10
+
+exclude: "tests/artifacts/.*\\.safetensors$"
+
repos:
+ ##### Meta #####
+ - repo: meta
+ hooks:
+ - id: check-useless-excludes
+ - id: check-hooks-apply
+
+ ##### General Code Quality & Formatting #####
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-added-large-files
+ args: ['--maxkb=1024']
- id: debug-statements
- id: check-merge-conflict
- id: check-case-conflict
@@ -13,25 +37,71 @@ repos:
- id: check-toml
- id: end-of-file-fixer
- id: trailing-whitespace
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.11.13
+ hooks:
+ - id: ruff-format
+ - id: ruff
+ args: [--fix, --exit-non-zero-on-fix]
+
+ - repo: https://github.com/crate-ci/typos
+ rev: v1.34.0
+ hooks:
+ - id: typos
+ args: [--force-exclude]
+
- repo: https://github.com/asottile/pyupgrade
- rev: v3.19.0
+ rev: v3.20.0
hooks:
- id: pyupgrade
- - repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.8.2
+ args: [--py310-plus]
+
+ ##### Markdown Quality #####
+ - repo: https://github.com/pre-commit/mirrors-prettier
+ rev: v4.0.0-alpha.8
hooks:
- - id: ruff
- args: [--fix]
- - id: ruff-format
- - repo: https://github.com/python-poetry/poetry
- rev: 1.8.0
- hooks:
- - id: poetry-check
- - id: poetry-lock
- args:
- - "--check"
- - "--no-update"
+ - id: prettier
+ name: Format Markdown with Prettier
+ types_or: [markdown, mdx]
+ args: [--prose-wrap=preserve]
+
+ ##### Security #####
- repo: https://github.com/gitleaks/gitleaks
- rev: v8.21.2
+ rev: v8.27.2
hooks:
- id: gitleaks
+
+ - repo: https://github.com/woodruffw/zizmor-pre-commit
+ rev: v1.11.0
+ hooks:
+ - id: zizmor
+
+ - repo: https://github.com/PyCQA/bandit
+ rev: 1.8.6
+ hooks:
+ - id: bandit
+ args: ["-c", "pyproject.toml"]
+ additional_dependencies: ["bandit[toml]"]
+
+ # TODO(Steven): Uncomment when ready to use
+ ##### Static Analysis & Typing #####
+ # - repo: https://github.com/pre-commit/mirrors-mypy
+ # rev: v1.16.0
+ # hooks:
+ # - id: mypy
+ # args: [--python-version=3.10]
+
+ ##### Docstring Checks #####
+ # - repo: https://github.com/akaihola/darglint2
+ # rev: v1.8.2
+ # hooks:
+ # - id: darglint2
+ # args: ["--docstring-style", "google", "-v", "2"]
+ # exclude: ^tests/.*$
+
+ # - repo: https://github.com/econchick/interrogate
+ # rev: 1.7.0
+ # hooks:
+ # - id: interrogate
+ # args: ["-vv", "--config=pyproject.toml"]
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 04a0527536..c0fdac843b 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,4 +1,3 @@
-
# Contributor Covenant Code of Conduct
## Our Pledge
@@ -18,23 +17,23 @@ diverse, inclusive, and healthy community.
Examples of behavior that contributes to a positive environment for our
community include:
-* Demonstrating empathy and kindness toward other people
-* Being respectful of differing opinions, viewpoints, and experiences
-* Giving and gracefully accepting constructive feedback
-* Accepting responsibility and apologizing to those affected by our mistakes,
+- Demonstrating empathy and kindness toward other people
+- Being respectful of differing opinions, viewpoints, and experiences
+- Giving and gracefully accepting constructive feedback
+- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
-* Focusing on what is best not just for us as individuals, but for the overall
+- Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
-* The use of sexualized language or imagery, and sexual attention or advances of
+- The use of sexualized language or imagery, and sexual attention or advances of
any kind
-* Trolling, insulting or derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or email address,
+- Trolling, insulting or derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or email address,
without their explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
+- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b8c198568e..369af602bf 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -15,10 +15,11 @@ Whichever way you choose to contribute, please be mindful to respect our
## You can contribute in so many ways!
Some of the ways you can contribute to 🤗 LeRobot:
-* Fixing outstanding issues with the existing code.
-* Implementing new models, datasets or simulation environments.
-* Contributing to the examples or to the documentation.
-* Submitting issues related to bugs or desired new features.
+
+- Fixing outstanding issues with the existing code.
+- Implementing new models, datasets or simulation environments.
+- Contributing to the examples or to the documentation.
+- Submitting issues related to bugs or desired new features.
Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:remi.cadene@huggingface.co).
@@ -40,24 +41,26 @@ already reported** (use the search bar on Github under Issues).
Did not find it? :( So we can act quickly on it, please follow these steps:
-* Include your **OS type and version**, the versions of **Python** and **PyTorch**.
-* A short, self-contained, code snippet that allows us to reproduce the bug in
+- Include your **OS type and version**, the versions of **Python** and **PyTorch**.
+- A short, self-contained, code snippet that allows us to reproduce the bug in
less than 30s.
-* The full traceback if an exception is raised.
-* Attach any other additional information, like screenshots, you think may help.
+- The full traceback if an exception is raised.
+- Attach any other additional information, like screenshots, you think may help.
### Do you want a new feature?
A good feature request addresses the following points:
1. Motivation first:
-* Is it related to a problem/frustration with the library? If so, please explain
+
+- Is it related to a problem/frustration with the library? If so, please explain
why. Providing a code snippet that demonstrates the problem is best.
-* Is it related to something you would need for a project? We'd love to hear
+- Is it related to something you would need for a project? We'd love to hear
about it!
-* Is it something you worked on and think could benefit the community?
+- Is it something you worked on and think could benefit the community?
Awesome! Tell us what problem it solved for you.
-2. Write a *paragraph* describing the feature.
+
+2. Write a _paragraph_ describing the feature.
3. Provide a **code snippet** that demonstrates its future use.
4. In case this is related to a paper, please attach a link.
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
@@ -67,19 +70,22 @@ post it.
## Adding new policies, datasets or environments
-Look at our implementations for [datasets](./lerobot/common/datasets/), [policies](./lerobot/common/policies/),
+Look at our implementations for [datasets](./src/lerobot/datasets/), [policies](./src/lerobot/policies/),
environments ([aloha](https://github.com/huggingface/gym-aloha),
[xarm](https://github.com/huggingface/gym-xarm),
[pusht](https://github.com/huggingface/gym-pusht))
and follow the same api design.
When implementing a new dataset loadable with LeRobotDataset follow these steps:
+
- Update `available_datasets_per_env` in `lerobot/__init__.py`
When implementing a new environment (e.g. `gym_aloha`), follow these steps:
+
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
+
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
- Set the required `name` class attribute.
- Update variables in `tests/test_available.py` by importing your new Policy class
@@ -129,36 +135,78 @@ Follow these steps to start contributing:
🚨 **Do not** work on the `main` branch.
-4. for development, we use `poetry` instead of just `pip` to easily track our dependencies.
- If you don't have it already, follow the [instructions](https://python-poetry.org/docs/#installation) to install it.
+4. for development, we advise to use a tool like `poetry` or `uv` instead of just `pip` to easily track our dependencies.
+ Follow the instructions to [install poetry](https://python-poetry.org/docs/#installation) (use a version >=2.1.0) or to [install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) if you don't have one of them already.
Set up a development environment with conda or miniconda:
+
```bash
conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev
```
+ If you're using `uv`, it can manage python versions so you can instead do:
+
+ ```bash
+ uv venv --python 3.10 && source .venv/bin/activate
+ ```
+
To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
+
+ using `poetry`
+
+ ```bash
+ poetry sync --extras "dev test"
+ ```
+
+ using `uv`
+
```bash
- poetry install --sync --extras "dev test"
+ uv sync --extra dev --extra test
```
You can also install the project with all its dependencies (including environments):
+
+ using `poetry`
+
```bash
- poetry install --sync --all-extras
+ poetry sync --all-extras
```
- > **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they *will* be tested in the CI. In general, we advise you to install everything and test locally before pushing.
+ using `uv`
+
+ ```bash
+ uv sync --all-extras
+ ```
- Whichever command you chose to install the project (e.g. `poetry install --sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
+ > **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they _will_ be tested in the CI. In general, we advise you to install everything and test locally before pushing.
+
+ Whichever command you chose to install the project (e.g. `poetry sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
The equivalent of `pip install some-package`, would just be:
+
+ using `poetry`
+
```bash
poetry add some-package
```
+ using `uv`
+
+ ```bash
+ uv add some-package
+ ```
+
When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
+ using `poetry`
+
```bash
- poetry lock --no-update
+ poetry lock
+ ```
+
+ using `uv`
+
+ ```bash
+ uv lock
```
5. Develop the features on your branch.
@@ -178,11 +226,13 @@ Follow these steps to start contributing:
automatically as Git commit hooks.
Install `pre-commit` hooks:
+
```bash
pre-commit install
```
You can run these hooks whenever you need on staged files with:
+
```bash
pre-commit
```
@@ -195,7 +245,8 @@ Follow these steps to start contributing:
git commit
```
- Note, if you already commited some changes that have a wrong formatting, you can use:
+ Note, if you already committed some changes that have a wrong formatting, you can use:
+
```bash
pre-commit run --all-files
```
@@ -216,16 +267,15 @@ Follow these steps to start contributing:
git push -u origin a-descriptive-name-for-my-changes
```
-6. Once you are satisfied (**and the checklist below is happy too**), go to the
+7. Once you are satisfied (**and the checklist below is happy too**), go to the
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
to the project maintainers for review.
-7. It's ok if maintainers ask you for changes. It happens to core contributors
+8. It's ok if maintainers ask you for changes. It happens to core contributors
too! So everyone can see the changes in the Pull request, work in your local
branch and push the changes to your fork. They will automatically appear in
the pull request.
-
### Checklist
1. The title of your pull request should be a summary of its contribution;
@@ -236,9 +286,6 @@ Follow these steps to start contributing:
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
it from PRs ready to be merged;
4. Make sure existing tests pass;
-
### Tests
@@ -247,18 +294,21 @@ An extensive test suite is included to test the library behavior and several exa
Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already).
On Mac:
+
```bash
brew install git-lfs
git lfs install
```
On Ubuntu:
+
```bash
sudo apt-get install git-lfs
git lfs install
```
-Pull artifacts if they're not in [tests/data](tests/data)
+Pull artifacts if they're not in [tests/artifacts](tests/artifacts)
+
```bash
git lfs pull
```
@@ -270,6 +320,5 @@ repository, here's how to run tests with `pytest` for the library:
python -m pytest -sv ./tests
```
-
You can specify a smaller set of tests in order to test only the feature
you're working on.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000..c1fb2ea754
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,2 @@
+include src/lerobot/templates/lerobot_modelcard_template.md
+include src/lerobot/datasets/card_template.md
diff --git a/Makefile b/Makefile
index f6517497e1..ca1495facd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,11 +1,25 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
.PHONY: tests
PYTHON_PATH := $(shell which python)
-# If Poetry is installed, redefine PYTHON_PATH to use the Poetry-managed Python
-POETRY_CHECK := $(shell command -v poetry)
-ifneq ($(POETRY_CHECK),)
- PYTHON_PATH := $(shell poetry run which python)
+# If uv is installed and a virtual environment exists, use it
+UV_CHECK := $(shell command -v uv)
+ifneq ($(UV_CHECK),)
+ PYTHON_PATH := $(shell .venv/bin/python)
endif
export PATH := $(dir $(PYTHON_PATH)):$(PATH)
@@ -20,171 +34,147 @@ build-gpu:
test-end-to-end:
${MAKE} DEVICE=$(DEVICE) test-act-ete-train
+ ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-resume
${MAKE} DEVICE=$(DEVICE) test-act-ete-eval
- ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-amp
- ${MAKE} DEVICE=$(DEVICE) test-act-ete-eval-amp
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train
- ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval
- ${MAKE} DEVICE=$(DEVICE) test-default-ete-eval
- ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial
+ ${MAKE} DEVICE=$(DEVICE) test-smolvla-ete-train
+ ${MAKE} DEVICE=$(DEVICE) test-smolvla-ete-eval
test-act-ete-train:
- python lerobot/scripts/train.py \
- policy=act \
- policy.dim_model=64 \
- env=aloha \
- wandb.enable=False \
- training.offline_steps=2 \
- training.online_steps=0 \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- device=$(DEVICE) \
- training.save_checkpoint=true \
- training.save_freq=2 \
- policy.n_action_steps=20 \
- policy.chunk_size=20 \
- training.batch_size=2 \
- training.image_transforms.enable=true \
- hydra.run.dir=tests/outputs/act/
+ python -m lerobot.scripts.train \
+ --policy.type=act \
+ --policy.dim_model=64 \
+ --policy.n_action_steps=20 \
+ --policy.chunk_size=20 \
+ --policy.device=$(DEVICE) \
+ --policy.push_to_hub=false \
+ --env.type=aloha \
+ --env.episode_length=5 \
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
+ --dataset.image_transforms.enable=true \
+ --dataset.episodes="[0]" \
+ --batch_size=2 \
+ --steps=4 \
+ --eval_freq=2 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1 \
+ --save_freq=2 \
+ --save_checkpoint=true \
+ --log_freq=1 \
+ --wandb.enable=false \
+ --output_dir=tests/outputs/act/
+
+test-act-ete-train-resume:
+ python -m lerobot.scripts.train \
+ --config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \
+ --resume=true
test-act-ete-eval:
- python lerobot/scripts/eval.py \
- -p tests/outputs/act/checkpoints/000002/pretrained_model \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=8 \
- device=$(DEVICE) \
-
-test-act-ete-train-amp:
- python lerobot/scripts/train.py \
- policy=act \
- policy.dim_model=64 \
- env=aloha \
- wandb.enable=False \
- training.offline_steps=2 \
- training.online_steps=0 \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- device=$(DEVICE) \
- training.save_checkpoint=true \
- training.save_freq=2 \
- policy.n_action_steps=20 \
- policy.chunk_size=20 \
- training.batch_size=2 \
- hydra.run.dir=tests/outputs/act_amp/ \
- training.image_transforms.enable=true \
- use_amp=true
-
-test-act-ete-eval-amp:
- python lerobot/scripts/eval.py \
- -p tests/outputs/act_amp/checkpoints/000002/pretrained_model \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=8 \
- device=$(DEVICE) \
- use_amp=true
+ python -m lerobot.scripts.eval \
+ --policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
+ --policy.device=$(DEVICE) \
+ --env.type=aloha \
+ --env.episode_length=5 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1
test-diffusion-ete-train:
- python lerobot/scripts/train.py \
- policy=diffusion \
- policy.down_dims=\[64,128,256\] \
- policy.diffusion_step_embed_dim=32 \
- policy.num_inference_steps=10 \
- env=pusht \
- wandb.enable=False \
- training.offline_steps=2 \
- training.online_steps=0 \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- device=$(DEVICE) \
- training.save_checkpoint=true \
- training.save_freq=2 \
- training.batch_size=2 \
- training.image_transforms.enable=true \
- hydra.run.dir=tests/outputs/diffusion/
+ python -m lerobot.scripts.train \
+ --policy.type=diffusion \
+ --policy.down_dims='[64,128,256]' \
+ --policy.diffusion_step_embed_dim=32 \
+ --policy.num_inference_steps=10 \
+ --policy.device=$(DEVICE) \
+ --policy.push_to_hub=false \
+ --env.type=pusht \
+ --env.episode_length=5 \
+ --dataset.repo_id=lerobot/pusht \
+ --dataset.image_transforms.enable=true \
+ --dataset.episodes="[0]" \
+ --batch_size=2 \
+ --steps=2 \
+ --eval_freq=2 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1 \
+ --save_checkpoint=true \
+ --save_freq=2 \
+ --log_freq=1 \
+ --wandb.enable=false \
+ --output_dir=tests/outputs/diffusion/
test-diffusion-ete-eval:
- python lerobot/scripts/eval.py \
- -p tests/outputs/diffusion/checkpoints/000002/pretrained_model \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=8 \
- device=$(DEVICE) \
+ python -m lerobot.scripts.eval \
+ --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
+ --policy.device=$(DEVICE) \
+ --env.type=pusht \
+ --env.episode_length=5 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1
test-tdmpc-ete-train:
- python lerobot/scripts/train.py \
- policy=tdmpc \
- env=xarm \
- env.task=XarmLift-v0 \
- dataset_repo_id=lerobot/xarm_lift_medium \
- wandb.enable=False \
- training.offline_steps=2 \
- training.online_steps=0 \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=2 \
- device=$(DEVICE) \
- training.save_checkpoint=true \
- training.save_freq=2 \
- training.batch_size=2 \
- training.image_transforms.enable=true \
- hydra.run.dir=tests/outputs/tdmpc/
-
-test-tdmpc-ete-train-with-online:
- python lerobot/scripts/train.py \
- env=pusht \
- env.gym.obs_type=environment_state_agent_pos \
- policy=tdmpc_pusht_keypoints \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=10 \
- device=$(DEVICE) \
- training.offline_steps=2 \
- training.online_steps=20 \
- training.save_checkpoint=false \
- training.save_freq=10 \
- training.batch_size=2 \
- training.online_rollout_n_episodes=2 \
- training.online_rollout_batch_size=2 \
- training.online_steps_between_rollouts=10 \
- training.online_buffer_capacity=15 \
- eval.use_async_envs=true \
- hydra.run.dir=tests/outputs/tdmpc_online/
-
+ python -m lerobot.scripts.train \
+ --policy.type=tdmpc \
+ --policy.device=$(DEVICE) \
+ --policy.push_to_hub=false \
+ --env.type=xarm \
+ --env.task=XarmLift-v0 \
+ --env.episode_length=5 \
+ --dataset.repo_id=lerobot/xarm_lift_medium \
+ --dataset.image_transforms.enable=true \
+ --dataset.episodes="[0]" \
+ --batch_size=2 \
+ --steps=2 \
+ --eval_freq=2 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1 \
+ --save_checkpoint=true \
+ --save_freq=2 \
+ --log_freq=1 \
+ --wandb.enable=false \
+ --output_dir=tests/outputs/tdmpc/
test-tdmpc-ete-eval:
- python lerobot/scripts/eval.py \
- -p tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=8 \
- device=$(DEVICE) \
-
-test-default-ete-eval:
- python lerobot/scripts/eval.py \
- --config lerobot/configs/default.yaml \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=8 \
- device=$(DEVICE) \
-
-test-act-pusht-tutorial:
- cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml
- python lerobot/scripts/train.py \
- policy=created_by_Makefile.yaml \
- env=pusht \
- wandb.enable=False \
- training.offline_steps=2 \
- eval.n_episodes=1 \
- eval.batch_size=1 \
- env.episode_length=2 \
- device=$(DEVICE) \
- training.save_model=true \
- training.save_freq=2 \
- training.batch_size=2 \
- training.image_transforms.enable=true \
- hydra.run.dir=tests/outputs/act_pusht/
- rm lerobot/configs/policy/created_by_Makefile.yaml
+ python -m lerobot.scripts.eval \
+ --policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
+ --policy.device=$(DEVICE) \
+ --env.type=xarm \
+ --env.episode_length=5 \
+ --env.task=XarmLift-v0 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1
+
+
+test-smolvla-ete-train:
+ python -m lerobot.scripts.train \
+ --policy.type=smolvla \
+ --policy.n_action_steps=20 \
+ --policy.chunk_size=20 \
+ --policy.device=$(DEVICE) \
+ --policy.push_to_hub=false \
+ --env.type=aloha \
+ --env.episode_length=5 \
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
+ --dataset.image_transforms.enable=true \
+ --dataset.episodes="[0]" \
+ --batch_size=2 \
+ --steps=4 \
+ --eval_freq=2 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1 \
+ --save_freq=2 \
+ --save_checkpoint=true \
+ --log_freq=1 \
+ --wandb.enable=false \
+ --output_dir=tests/outputs/smolvla/
+
+test-smolvla-ete-eval:
+ python -m lerobot.scripts.eval \
+ --policy.path=tests/outputs/smolvla/checkpoints/000004/pretrained_model \
+ --policy.device=$(DEVICE) \
+ --env.type=aloha \
+ --env.episode_length=5 \
+ --eval.n_episodes=1 \
+ --eval.batch_size=1
diff --git a/README.md b/README.md
index 849a14de55..1d7cbcad48 100644
--- a/README.md
+++ b/README.md
@@ -23,15 +23,60 @@
Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!
+
Check out the LeKiwi tutorial and bring your robot to life on wheels.
+
+
@@ -42,7 +87,6 @@
---
-
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
@@ -68,113 +112,107 @@
### Acknowledgment
+- The LeRobot team 🤗 for building SmolVLA [Paper](https://arxiv.org/abs/2506.01844), [Blog](https://huggingface.co/blog/smolvla).
- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io).
- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM).
- Thanks to Antonio Loquercio and Ashish Kumar for their early support.
- Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official).
-
## Installation
Download our source code:
+
```bash
git clone https://github.com/huggingface/lerobot.git
cd lerobot
```
Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html):
+
```bash
conda create -y -n lerobot python=3.10
conda activate lerobot
```
+When using `miniconda`, install `ffmpeg` in your environment:
+
+```bash
+conda install ffmpeg -c conda-forge
+```
+
+> **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
+>
+> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
+>
+> ```bash
+> conda install ffmpeg=7.1.1 -c conda-forge
+> ```
+>
+> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
+
Install 🤗 LeRobot:
+
```bash
pip install -e .
```
-> **NOTE:** Depending on your platform, If you encounter any build errors during this step
-you may need to install `cmake` and `build-essential` for building some of our dependencies.
-On linux: `sudo apt-get install cmake build-essential`
+> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run:
+> `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
+
- [aloha](https://github.com/huggingface/gym-aloha)
- [xarm](https://github.com/huggingface/gym-xarm)
- [pusht](https://github.com/huggingface/gym-pusht)
For instance, to install 🤗 LeRobot with aloha and pusht, use:
+
```bash
pip install -e ".[aloha, pusht]"
```
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
+
```bash
wandb login
```
(note: you will also need to enable WandB in the configuration. See below.)
-## Walkthrough
-
-```
-.
-├── examples # contains demonstration examples, start here to learn about LeRobot
-| └── advanced # contains even more examples for those who have mastered the basics
-├── lerobot
-| ├── configs # contains hydra yaml files with all options that you can override in the command line
-| | ├── default.yaml # selected by default, it loads pusht environment and diffusion policy
-| | ├── env # various sim environments and their datasets: aloha.yaml, pusht.yaml, xarm.yaml
-| | └── policy # various policies: act.yaml, diffusion.yaml, tdmpc.yaml
-| ├── common # contains classes and utilities
-| | ├── datasets # various datasets of human demonstrations: aloha, pusht, xarm
-| | ├── envs # various sim environments: aloha, pusht, xarm
-| | ├── policies # various policies: act, diffusion, tdmpc
-| | ├── robot_devices # various real devices: dynamixel motors, opencv cameras, koch robots
-| | └── utils # various utilities
-| └── scripts # contains functions to execute via command line
-| ├── eval.py # load policy and evaluate it on an environment
-| ├── train.py # train a policy via imitation learning and/or reinforcement learning
-| ├── control_robot.py # teleoperate a real robot, record data, run a policy
-| ├── push_dataset_to_hub.py # convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub
-| └── visualize_dataset.py # load a dataset and render its demonstrations
-├── outputs # contains results of scripts execution: logs, videos, model checkpoints
-└── tests # contains pytest utilities for continuous integration
-```
-
### Visualize datasets
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub.
You can also locally visualize episodes from a dataset on the hub by executing our script from the command line:
+
```bash
-python lerobot/scripts/visualize_dataset.py \
+python -m lerobot.scripts.visualize_dataset \
--repo-id lerobot/pusht \
--episode-index 0
```
or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
+
```bash
-python lerobot/scripts/visualize_dataset.py \
+python -m lerobot.scripts.visualize_dataset \
--repo-id lerobot/pusht \
--root ./my_local_data_dir \
--local-files-only 1 \
--episode-index 0
```
-
It will open `rerun.io` and display the camera streams, robot states and actions, like this:
https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144
-
-Our script can also visualize datasets stored on a distant server. See `python lerobot/scripts/visualize_dataset.py --help` for more instructions.
+Our script can also visualize datasets stored on a distant server. See `python -m lerobot.scripts.visualize_dataset --help` for more instructions.
### The `LeRobotDataset` format
A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model.
-A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](examples/1_load_lerobot_dataset.py) for more details on `delta_timestamps`.
+A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](examples/1_load_lerobot_dataset.py) for more details on `delta_timestamps`.
Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor.
@@ -191,7 +229,7 @@ dataset attributes:
│ ├ episode_index (int64): index of the episode for this sample
│ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode
│ ├ timestamp (float32): timestamp in the episode
- │ ├ next.done (bool): indicates the end of en episode ; True for the last frame in each episode
+ │ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode
│ └ index (int64): general index in the whole dataset
├ episode_data_index: contains 2 tensors with the start and end indices of each episode
│ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0
@@ -209,100 +247,65 @@ dataset attributes:
```
A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely:
+
- hf_dataset stored using Hugging Face datasets library serialization to parquet
- videos are stored in mp4 format to save space
- metadata are stored in plain json/jsonl files
-Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can use the `local_files_only` argument and specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location.
+Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location.
### Evaluate a pretrained policy
Check out [example 2](./examples/2_evaluate_pretrained_policy.py) that illustrates how to download a pretrained policy from Hugging Face hub, and run an evaluation on its corresponding environment.
We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht):
+
```bash
-python lerobot/scripts/eval.py \
- -p lerobot/diffusion_pusht \
- eval.n_episodes=10 \
- eval.batch_size=10
+python -m lerobot.scripts.eval \
+ --policy.path=lerobot/diffusion_pusht \
+ --env.type=pusht \
+ --eval.batch_size=10 \
+ --eval.n_episodes=10 \
+ --policy.use_amp=false \
+ --policy.device=cuda
```
Note: After training your own policy, you can re-evaluate the checkpoints with:
```bash
-python lerobot/scripts/eval.py -p {OUTPUT_DIR}/checkpoints/last/pretrained_model
+python -m lerobot.scripts.eval --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model
```
-See `python lerobot/scripts/eval.py --help` for more instructions.
+See `python -m lerobot.scripts.eval --help` for more instructions.
### Train your own policy
Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
-In general, you can use our training script to easily train any policy. Here is an example of training the ACT policy on trajectories collected by humans on the Aloha simulation environment for the insertion task:
-
-```bash
-python lerobot/scripts/train.py \
- policy=act \
- env=aloha \
- env.task=AlohaInsertion-v0 \
- dataset_repo_id=lerobot/aloha_sim_insertion_human \
-```
-
-The experiment directory is automatically generated and will show up in yellow in your terminal. It looks like `outputs/train/2024-05-05/20-21-12_aloha_act_default`. You can manually specify an experiment directory by adding this argument to the `train.py` python command:
-```bash
- hydra.run.dir=your/new/experiment/dir
-```
-
-In the experiment directory there will be a folder called `checkpoints` which will have the following structure:
-
-```bash
-checkpoints
-├── 000250 # checkpoint_dir for training step 250
-│ ├── pretrained_model # Hugging Face pretrained model dir
-│ │ ├── config.json # Hugging Face pretrained model config
-│ │ ├── config.yaml # consolidated Hydra config
-│ │ ├── model.safetensors # model weights
-│ │ └── README.md # Hugging Face model card
-│ └── training_state.pth # optimizer/scheduler/rng state and training step
-```
-
-To resume training from a checkpoint, you can add these to the `train.py` python command:
-```bash
- hydra.run.dir=your/original/experiment/dir resume=true
-```
-
-It will load the pretrained model, optimizer and scheduler states for training. For more information please see our tutorial on training resumption [here](https://github.com/huggingface/lerobot/blob/main/examples/5_resume_training.md).
+To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`.
-To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding:
-
-```bash
- wandb.enable=true
-```
-
-A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs.
+A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](./examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs.

-Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions.
+Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python -m lerobot.scripts.eval --help` for more instructions.
#### Reproduce state-of-the-art (SOTA)
-We have organized our configuration files (found under [`lerobot/configs`](./lerobot/configs)) such that they reproduce SOTA results from a given model variant in their respective original works. Simply running:
+We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances.
+You can reproduce their training by loading the config from their run. Simply running:
```bash
-python lerobot/scripts/train.py policy=diffusion env=pusht
+python -m lerobot.scripts.train --config_path=lerobot/diffusion_pusht
```
reproduces SOTA results for Diffusion Policy on the PushT task.
-Pretrained policies, along with reproduction details, can be found under the "Models" section of https://huggingface.co/lerobot.
-
## Contribute
If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md).
-### Add a new dataset
+
### Add a pretrained policy
Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)).
You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain:
+
- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config).
- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format.
-- `config.yaml`: A consolidated Hydra training configuration containing the policy, environment, and dataset configs. The policy configuration should match `config.json` exactly. The environment config is useful for anyone who wants to evaluate your policy. The dataset config just serves as a paper trail for reproducibility.
+- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility.
To upload these to the hub, run the following:
+
```bash
huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model
```
See [eval.py](https://github.com/huggingface/lerobot/blob/main/lerobot/scripts/eval.py) for an example of how other people may use your policy.
-
### Improve your code with profiling
An example of a code snippet to profile the evaluation of a policy:
+
+
```python
from torch.profiler import profile, record_function, ProfilerActivity
@@ -363,13 +368,15 @@ with profile(
prof.step()
# insert code to profile, potentially whole body of eval_policy function
```
+
## Citation
If you want, you can cite this work with:
+
```bibtex
@misc{cadene2024lerobot,
- author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Wolf, Thomas},
+ author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascale, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas},
title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch},
howpublished = "\url{https://github.com/huggingface/lerobot}",
year = {2024}
@@ -378,7 +385,19 @@ If you want, you can cite this work with:
Additionally, if you are using any of the particular policy architecture, pretrained models, or datasets, it is recommended to cite the original authors of the work as they appear below:
+- [SmolVLA](https://arxiv.org/abs/2506.01844)
+
+```bibtex
+@article{shukor2025smolvla,
+ title={SmolVLA: A Vision-Language-Action Model for Affordable and Efficient Robotics},
+ author={Shukor, Mustafa and Aubakirova, Dana and Capuano, Francesco and Kooijmans, Pepijn and Palma, Steven and Zouitine, Adil and Aractingi, Michel and Pascal, Caroline and Russi, Martino and Marafioti, Andres and Alibert, Simon and Cord, Matthieu and Wolf, Thomas and Cadene, Remi},
+ journal={arXiv preprint arXiv:2506.01844},
+ year={2025}
+}
+```
+
- [Diffusion Policy](https://diffusion-policy.cs.columbia.edu)
+
```bibtex
@article{chi2024diffusionpolicy,
author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
@@ -387,7 +406,9 @@ Additionally, if you are using any of the particular policy architecture, pretra
year = {2024},
}
```
+
- [ACT or ALOHA](https://tonyzhaozh.github.io/aloha)
+
```bibtex
@article{zhao2023learning,
title={Learning fine-grained bimanual manipulation with low-cost hardware},
@@ -409,6 +430,7 @@ Additionally, if you are using any of the particular policy architecture, pretra
```
- [VQ-BeT](https://sjlee.cc/vq-bet/)
+
```bibtex
@article{lee2024behavior,
title={Behavior generation with latent actions},
@@ -417,3 +439,20 @@ Additionally, if you are using any of the particular policy architecture, pretra
year={2024}
}
```
+
+- [HIL-SERL](https://hil-serl.github.io/)
+
+```bibtex
+@Article{luo2024hilserl,
+title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning},
+author={Jianlan Luo and Charles Xu and Jeffrey Wu and Sergey Levine},
+year={2024},
+eprint={2410.21845},
+archivePrefix={arXiv},
+primaryClass={cs.RO}
+}
+```
+
+## Star History
+
+[](https://star-history.com/#huggingface/lerobot&Timeline)
diff --git a/benchmarks/video/README.md b/benchmarks/video/README.md
index 56cd1d1e22..490a4b4950 100644
--- a/benchmarks/video/README.md
+++ b/benchmarks/video/README.md
@@ -1,28 +1,32 @@
# Video benchmark
-
## Questions
+
What is the optimal trade-off between:
+
- maximizing loading time with random access,
- minimizing memory space on disk,
- maximizing success rate of policies,
- compatibility across devices/platforms for decoding videos (e.g. video players, web browsers).
How to encode videos?
+
- Which video codec (`-vcodec`) to use? h264, h265, AV1?
- What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`?
- How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`?
- Which frequency to chose for key frames (`-g`)? A key frame every `10` frames?
How to decode videos?
+
- Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`?
- What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`)
-
## Variables
+
**Image content & size**
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
For these reasons, we run this benchmark on four representative datasets:
+
- `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
- `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera.
@@ -34,8 +38,9 @@ Note: The datasets used for this benchmark need to be image datasets, not video
We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.).
### Encoding parameters
+
| parameter | values |
-|-------------|--------------------------------------------------------------|
+| ----------- | ------------------------------------------------------------ |
| **vcodec** | `libx264`, `libx265`, `libsvtav1` |
| **pix_fmt** | `yuv444p`, `yuv420p` |
| **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` |
@@ -44,19 +49,23 @@ We might revisit this benchmark and find better settings if we train our policie
Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames.
For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used:
+
- h264: https://trac.ffmpeg.org/wiki/Encode/H.264
- h265: https://trac.ffmpeg.org/wiki/Encode/H.265
- AV1: https://trac.ffmpeg.org/wiki/Encode/AV1
### Decoding parameters
+
**Decoder**
We tested two video decoding backends from torchvision:
-- `pyav` (default)
+
+- `pyav`
- `video_reader` (requires to build torchvision from source)
**Requested timestamps**
Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast.
This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios:
+
- `1_frame`: 1 frame,
- `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`),
- `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`)
@@ -64,12 +73,13 @@ This of course is affected by the `-g` parameter during encoding, which specifie
Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario:
+
- `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
-
## Metrics
+
**Data compression ratio (lower is better)**
`video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images.
@@ -87,18 +97,18 @@ However, due to how video decoding is implemented with `pyav`, we don't have acc
One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes.
h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
+
- `yuv420p` is more widely supported across various platforms, including web browsers.
- `yuv444p` offers higher color fidelity but might not be supported as broadly.
-
-
## How the benchmark works
+
The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset.
**Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy).
@@ -110,15 +120,18 @@ Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv ta
These are then all concatenated to a single table ready for analysis.
## Caveats
+
We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination.
Additional encoding parameters exist that are not included in this benchmark. In particular:
+
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
-- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
+- `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.).
See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
+
- `torchaudio`
- `ffmpegio`
- `decord`
@@ -127,16 +140,17 @@ Similarly on the decoding side, other decoders exist but are not implemented in
Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding.
However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark.
-
## Install
+
Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)).
**Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built.
-
## Adding a video decoder
+
Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`.
You can easily add a new decoder to benchmark by adding it to this function in the script:
+
```diff
def decode_video_frames(
video_path: str,
@@ -156,9 +170,10 @@ def decode_video_frames(
raise NotImplementedError(backend)
```
-
## Example
+
For a quick run, you can try these parameters:
+
```bash
python benchmark/video/run_video_benchmark.py \
--output-dir outputs/video_benchmark \
@@ -176,11 +191,12 @@ python benchmark/video/run_video_benchmark.py \
--save-frames 0
```
-
## Results
### Reproduce
+
We ran the benchmark with the following parameters:
+
```bash
# h264 and h265 encodings
python benchmark/video/run_video_benchmark.py \
@@ -221,9 +237,10 @@ python benchmark/video/run_video_benchmark.py \
The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing)
-
### Parameters selected for LeRobotDataset
+
Considering these results, we chose what we think is the best set of encoding parameter:
+
- vcodec: `libsvtav1`
- pix-fmt: `yuv420p`
- g: `2`
@@ -236,7 +253,7 @@ Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_read
These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav`
| video_images_size_ratio | vcodec | pix_fmt | | | |
-|------------------------------------|------------|---------|-----------|-----------|-----------|
+| ---------------------------------- | ---------- | ------- | --------- | --------- | --------- |
| | libx264 | | libx265 | | libsvtav1 |
| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% |
@@ -245,7 +262,7 @@ These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_
| aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% |
| video_images_load_time_ratio | vcodec | pix_fmt | | | |
-|------------------------------------|---------|---------|----------|---------|-----------|
+| ---------------------------------- | ------- | ------- | -------- | ------- | --------- |
| | libx264 | | libx265 | | libsvtav1 |
| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 |
@@ -254,7 +271,7 @@ These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_
| aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** |
| | | vcodec | pix_fmt | | | |
-|------------------------------------|----------|----------|--------------|----------|-----------|--------------|
+| ---------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ |
| | | libx264 | | libx265 | | libsvtav1 |
| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 |
diff --git a/benchmarks/video/capture_camera_feed.py b/benchmarks/video/capture_camera_feed.py
old mode 100644
new mode 100755
index 3b4c356a82..8f8530532d
--- a/benchmarks/video/capture_camera_feed.py
+++ b/benchmarks/video/capture_camera_feed.py
@@ -17,12 +17,21 @@
import argparse
import datetime as dt
+import os
+import time
from pathlib import Path
import cv2
+import rerun as rr
+# see https://rerun.io/docs/howto/visualization/limit-ram
+RERUN_MEMORY_LIMIT = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "5%")
+
+
+def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int, duration: int):
+ rr.init("lerobot_capture_camera_feed")
+ rr.spawn(memory_limit=RERUN_MEMORY_LIMIT)
-def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int):
now = dt.datetime.now()
capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}"
if not capture_dir.exists():
@@ -39,24 +48,21 @@ def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
frame_index = 0
- while True:
+ start_time = time.time()
+ while time.time() - start_time < duration:
ret, frame = cap.read()
if not ret:
print("Error: Could not read frame.")
break
-
- cv2.imshow("Video Stream", frame)
+ rr.log("video/stream", rr.Image(frame), static=True)
cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame)
frame_index += 1
- # Break the loop on 'q' key press
- if cv2.waitKey(1) & 0xFF == ord("q"):
- break
-
- # Release the capture and destroy all windows
+ # Release the capture
cap.release()
- cv2.destroyAllWindows()
+
+ # TODO(Steven): Add a graceful shutdown via a close() method for the Viewer context, though not currently supported in the Rerun API.
if __name__ == "__main__":
@@ -86,5 +92,11 @@ def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height
default=720,
help="Height of the captured images.",
)
+ parser.add_argument(
+ "--duration",
+ type=int,
+ default=20,
+ help="Duration in seconds for which the video stream should be captured.",
+ )
args = parser.parse_args()
display_and_save_video_stream(**vars(args))
diff --git a/benchmarks/video/run_video_benchmark.py b/benchmarks/video/run_video_benchmark.py
index e906648723..bababf636c 100644
--- a/benchmarks/video/run_video_benchmark.py
+++ b/benchmarks/video/run_video_benchmark.py
@@ -35,12 +35,12 @@
from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
from tqdm import tqdm
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.common.datasets.video_utils import (
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.video_utils import (
decode_video_frames_torchvision,
encode_video_frames,
)
-from lerobot.common.utils.benchmark import TimeBenchmark
+from lerobot.utils.benchmark import TimeBenchmark
BASE_ENCODING = OrderedDict(
[
@@ -67,7 +67,7 @@ def parse_int_or_none(value) -> int | None:
def check_datasets_formats(repo_ids: list) -> None:
for repo_id in repo_ids:
dataset = LeRobotDataset(repo_id)
- if dataset.video:
+ if len(dataset.meta.video_keys) > 0:
raise ValueError(
f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}"
)
@@ -416,7 +416,7 @@ def main(
"--vcodec",
type=str,
nargs="*",
- default=["libx264", "libx265", "libsvtav1"],
+ default=["libx264", "hevc", "libsvtav1"],
help="Video codecs to be tested",
)
parser.add_argument(
@@ -446,7 +446,7 @@ def main(
# nargs="*",
# default=[0, 1],
# help="Use the fastdecode tuning option. 0 disables it. "
- # "For libx264 and libx265, only 1 is possible. "
+ # "For libx264 and libx265/hevc, only 1 is possible. "
# "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization",
# )
parser.add_argument(
diff --git a/docker/Dockerfile.internal b/docker/Dockerfile.internal
new file mode 100644
index 0000000000..0516064497
--- /dev/null
+++ b/docker/Dockerfile.internal
@@ -0,0 +1,60 @@
+# Dockerfile.internal
+# This Dockerfile is designed for HuggingFace internal CI environments
+# that require GPU access. It starts from an NVIDIA CUDA base image.
+
+# docker build -f docker/Dockerfile.internal -t lerobot-ci .
+
+# Configure the base image for CI with GPU access
+ARG CUDA_VERSION=12.9.1
+ARG OS_VERSION=24.04
+FROM nvidia/cuda:${CUDA_VERSION}-base-ubuntu${OS_VERSION}
+
+# Define Python version argument
+ARG PYTHON_VERSION=3.10
+
+# Configure environment variables
+ENV DEBIAN_FRONTEND=noninteractive \
+ MUJOCO_GL="egl" \
+ PATH="/lerobot/.venv/bin:$PATH"
+
+# Install Python, system dependencies, and uv (as root)
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ software-properties-common \
+ build-essential git curl \
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
+ libusb-1.0-0-dev \
+ speech-dispatcher libgeos-dev \
+ && add-apt-repository -y ppa:deadsnakes/ppa \
+ && apt-get update \
+ && apt-get install -y --no-install-recommends \
+ python${PYTHON_VERSION} \
+ python${PYTHON_VERSION}-venv \
+ python${PYTHON_VERSION}-dev \
+ && curl -LsSf https://astral.sh/uv/install.sh | sh \
+ && mv /root/.local/bin/uv /usr/local/bin/uv \
+ && useradd --create-home --shell /bin/bash user_lerobot \
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Create application directory and set permissions
+WORKDIR /lerobot
+RUN chown -R user_lerobot:user_lerobot /lerobot
+
+# Switch to the non-root user
+USER user_lerobot
+
+# Create the virtual environment
+# We use a virtual environment inside the container—even though the container itself \
+# provides isolation—to ensure compatibility with the cluster and to prevent \
+# issues with MuJoCo and OpenGL drivers.
+RUN uv venv --python python${PYTHON_VERSION}
+
+# Install Python dependencies for caching
+COPY --chown=user_lerobot:user_lerobot pyproject.toml README.md ./
+COPY --chown=user_lerobot:user_lerobot src/ src/
+RUN uv pip install --no-cache ".[all]"
+
+# Copy the rest of the application source code
+COPY --chown=user_lerobot:user_lerobot . .
+
+# Set the default command
+CMD ["/bin/bash"]
diff --git a/docker/Dockerfile.user b/docker/Dockerfile.user
new file mode 100644
index 0000000000..ce63f55307
--- /dev/null
+++ b/docker/Dockerfile.user
@@ -0,0 +1,50 @@
+# Dockerfile.user
+# This Dockerfile is designed for a lerobot user who wants to
+# experiment with the project. It starts from an Python Slim base image.
+
+# docker build -f docker/Dockerfile.user -t lerobot-user .
+# docker run -it --rm lerobot-user
+
+# Configure the base image
+ARG PYTHON_VERSION=3.10
+FROM python:${PYTHON_VERSION}-slim
+
+# Configure environment variables
+ENV DEBIAN_FRONTEND=noninteractive \
+ MUJOCO_GL="egl" \
+ PATH="/lerobot/.venv/bin:$PATH"
+
+# Install system dependencies and uv (as root)
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential git curl \
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
+ libusb-1.0-0-dev \
+ speech-dispatcher libgeos-dev \
+ && curl -LsSf https://astral.sh/uv/install.sh | sh \
+ && mv /root/.local/bin/uv /usr/local/bin/uv \
+ && useradd --create-home --shell /bin/bash user_lerobot \
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Create application directory and set permissions
+WORKDIR /lerobot
+RUN chown -R user_lerobot:user_lerobot /lerobot
+
+# Switch to the non-root user
+USER user_lerobot
+
+# Create the virtual environment
+# We use a virtual environment inside the container—even though the container itself \
+# provides isolation—to closely resemble local development and allow users to \
+# run other Python projects in the same container without dependency conflicts.
+RUN uv venv
+
+# Install Python dependencies for caching
+COPY --chown=user_lerobot:user_lerobot pyproject.toml README.md ./
+COPY --chown=user_lerobot:user_lerobot src/ src/
+RUN uv pip install --no-cache ".[all]"
+
+# Copy the rest of the application code
+COPY --chown=user_lerobot:user_lerobot . .
+
+# Set the default command
+CMD ["/bin/bash"]
diff --git a/docker/lerobot-cpu/Dockerfile b/docker/lerobot-cpu/Dockerfile
index 34f5361a84..85c31ac1a7 100644
--- a/docker/lerobot-cpu/Dockerfile
+++ b/docker/lerobot-cpu/Dockerfile
@@ -1,32 +1,29 @@
# Configure image
ARG PYTHON_VERSION=3.10
-
FROM python:${PYTHON_VERSION}-slim
+
+# Configure environment variables
ARG PYTHON_VERSION
-ARG DEBIAN_FRONTEND=noninteractive
+ENV DEBIAN_FRONTEND=noninteractive
+ENV MUJOCO_GL="egl"
+ENV PATH="/opt/venv/bin:$PATH"
-# Install apt dependencies
+# Install dependencies and set up Python in a single layer
RUN apt-get update && apt-get install -y --no-install-recommends \
- build-essential cmake \
+ build-essential cmake git \
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
- speech-dispatcher \
- && apt-get clean && rm -rf /var/lib/apt/lists/*
+ speech-dispatcher libgeos-dev \
+ && ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
+ && python -m venv /opt/venv \
+ && apt-get clean && rm -rf /var/lib/apt/lists/* \
+ && echo "source /opt/venv/bin/activate" >> /root/.bashrc
-# Create virtual environment
-RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
-RUN python -m venv /opt/venv
-ENV PATH="/opt/venv/bin:$PATH"
-RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
-
-# Install LeRobot
+# Clone repository and install LeRobot in a single layer
COPY . /lerobot
WORKDIR /lerobot
-RUN pip install --upgrade --no-cache-dir pip
-RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
- --extra-index-url https://download.pytorch.org/whl/cpu
-
-# Set EGL as the rendering backend for MuJoCo
-ENV MUJOCO_GL="egl"
+RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
+ && /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, smolvla]" \
+ --extra-index-url https://download.pytorch.org/whl/cpu
# Execute in bash shell rather than python
CMD ["/bin/bash"]
diff --git a/docker/lerobot-gpu-dev/Dockerfile b/docker/lerobot-gpu-dev/Dockerfile
index ca0abb1b5a..4d25b25506 100644
--- a/docker/lerobot-gpu-dev/Dockerfile
+++ b/docker/lerobot-gpu-dev/Dockerfile
@@ -13,8 +13,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
sed gawk grep curl wget zip unzip \
tcpdump sysstat screen tmux \
libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
- speech-dispatcher portaudio19-dev \
- python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
+ speech-dispatcher portaudio19-dev libgeos-dev \
+ python${PYTHON_VERSION} python${PYTHON_VERSION}-venv python${PYTHON_VERSION}-dev \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Install ffmpeg build dependencies. See:
@@ -58,7 +58,7 @@ RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \
RUN ln -s /usr/bin/python3 /usr/bin/python
# Install poetry
-RUN curl -sSL https://install.python-poetry.org | python - --version 1.8.5
+RUN curl -sSL https://install.python-poetry.org | python -
ENV PATH="/root/.local/bin:$PATH"
RUN echo 'if [ "$HOME" != "/root" ]; then ln -sf /root/.local/bin/poetry $HOME/.local/bin/poetry; fi' >> /root/.bashrc
RUN poetry config virtualenvs.create false
diff --git a/docker/lerobot-gpu/Dockerfile b/docker/lerobot-gpu/Dockerfile
index 92640cf4b4..746ea29b7c 100644
--- a/docker/lerobot-gpu/Dockerfile
+++ b/docker/lerobot-gpu/Dockerfile
@@ -1,30 +1,24 @@
FROM nvidia/cuda:12.4.1-base-ubuntu22.04
-# Configure image
+# Configure environment variables
ARG PYTHON_VERSION=3.10
-ARG DEBIAN_FRONTEND=noninteractive
-
+ENV DEBIAN_FRONTEND=noninteractive
+ENV MUJOCO_GL="egl"
+ENV PATH="/opt/venv/bin:$PATH"
-# Install apt dependencies
+# Install dependencies and set up Python in a single layer
RUN apt-get update && apt-get install -y --no-install-recommends \
- build-essential cmake \
+ build-essential cmake git \
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
- speech-dispatcher \
+ speech-dispatcher libgeos-dev \
python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
- && apt-get clean && rm -rf /var/lib/apt/lists/*
-
+ && ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
+ && python -m venv /opt/venv \
+ && apt-get clean && rm -rf /var/lib/apt/lists/* \
+ && echo "source /opt/venv/bin/activate" >> /root/.bashrc
-# Create virtual environment
-RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
-RUN python -m venv /opt/venv
-ENV PATH="/opt/venv/bin:$PATH"
-RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
-
-# Install LeRobot
+# Clone repository and install LeRobot in a single layer
COPY . /lerobot
WORKDIR /lerobot
-RUN pip install --upgrade --no-cache-dir pip
-RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
-
-# Set EGL as the rendering backend for MuJoCo
-ENV MUJOCO_GL="egl"
+RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
+ && /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel, smolvla]"
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000..967de7b841
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,139 @@
+
+
+# Generating the documentation
+
+To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
+you can install them with the following command, at the root of the code repository:
+
+```bash
+pip install -e ".[docs]"
+```
+
+You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download)
+
+---
+
+**NOTE**
+
+You only need to generate the documentation to inspect it locally (if you're planning changes and want to
+check how they look before committing for instance). You don't have to `git commit` the built documentation.
+
+---
+
+## Building the documentation
+
+Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
+typing the following command:
+
+```bash
+doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build
+```
+
+You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
+the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
+Markdown editor.
+
+## Previewing the documentation
+
+To preview the docs, first install the `watchdog` module with:
+
+```bash
+pip install watchdog
+```
+
+Then run the following command:
+
+```bash
+doc-builder preview lerobot docs/source/
+```
+
+The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
+
+---
+
+**NOTE**
+
+The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
+
+---
+
+## Adding a new element to the navigation bar
+
+Accepted files are Markdown (.md).
+
+Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
+the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file.
+
+## Renaming section headers and moving sections
+
+It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
+
+Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
+
+So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
+
+```
+Sections that were moved:
+
+[ Section A ]
+```
+
+and of course, if you moved it to another file, then:
+
+```
+Sections that were moved:
+
+[ Section A ]
+```
+
+Use the relative style to link to the new file so that the versioned docs continue to work.
+
+For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
+
+### Adding a new tutorial
+
+Adding a new tutorial or section is done in two steps:
+
+- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
+- Link that file in `./source/_toctree.yml` on the correct toc-tree.
+
+Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR.
+
+### Writing source documentation
+
+Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
+and objects like True, None or any strings should usually be put in `code`.
+
+#### Writing a multi-line code block
+
+Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
+
+````
+```
+# first line of code
+# second line
+# etc
+```
+````
+
+#### Adding an image
+
+Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
+the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
+them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
+If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
+to this dataset.
diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml
new file mode 100644
index 0000000000..1af96d79d3
--- /dev/null
+++ b/docs/source/_toctree.yml
@@ -0,0 +1,48 @@
+- sections:
+ - local: index
+ title: LeRobot
+ - local: installation
+ title: Installation
+ title: Get started
+- sections:
+ - local: il_robots
+ title: Imitation Learning for Robots
+ - local: il_sim
+ title: Imitation Learning in Sim
+ - local: cameras
+ title: Cameras
+ - local: integrate_hardware
+ title: Bring Your Own Hardware
+ - local: hilserl
+ title: Train a Robot with RL
+ - local: hilserl_sim
+ title: Train RL in Simulation
+ - local: async
+ title: Use Async Inference
+ title: "Tutorials"
+- sections:
+ - local: smolvla
+ title: Finetune SmolVLA
+ title: "Policies"
+- sections:
+ - local: hope_jr
+ title: Hope Jr
+ - local: so101
+ title: SO-101
+ - local: so100
+ title: SO-100
+ - local: koch
+ title: Koch v1.1
+ - local: lekiwi
+ title: LeKiwi
+ title: "Robots"
+- sections:
+ - local: notebooks
+ title: Notebooks
+ title: "Resources"
+- sections:
+ - local: contributing
+ title: Contribute to LeRobot
+ - local: backwardcomp
+ title: Backward compatibility
+ title: "About"
diff --git a/docs/source/async.mdx b/docs/source/async.mdx
new file mode 100644
index 0000000000..397c513cff
--- /dev/null
+++ b/docs/source/async.mdx
@@ -0,0 +1,312 @@
+# Asynchronous Inference
+
+With our [SmolVLA](https://huggingface.co/papers/2506.01844) we introduced a new way to run inference on real-world robots, **decoupling action prediction from action execution**.
+In this tutorial, we'll show how to use asynchronous inference (_async inference_) using a finetuned version of SmolVLA, and all the policies supported by LeRobot.
+**Try async inference with all the policies** supported by LeRobot!
+
+**What you'll learn:**
+
+1. Why asynchronous inference matters and how it compares to, more traditional, sequential inference.
+2. How to spin-up a `PolicyServer` and connect a `RobotClient` from the same machine, and even over the network.
+3. How to tune key parameters (`actions_per_chunk`, `chunk_size_threshold`) for your robot and policy.
+
+If you get stuck, hop into our [Discord community](https://discord.gg/s3KuuzsPFb)!
+
+In a nutshell: with _async inference_, your robot keeps acting while the policy server is already busy computing the next chunk of actions---eliminating "wait-for-inference" lags and unlocking smoother, more reactive behaviours.
+This is fundamentally different from synchronous inference (sync), where the robot stays idle while the policy computes the next chunk of actions.
+
+---
+
+## Getting started with async inference
+
+You can read more information on asynchronous inference in our [blogpost](https://huggingface.co/blog/async-robot-inference). This guide is designed to help you quickly set up and run asynchronous inference in your environment.
+
+First, install `lerobot` with the `async` tag, to install the extra dependencies required to run async inference.
+
+```shell
+pip install -e ".[async]"
+```
+
+Then, spin up a policy server (in one terminal, or in a separate machine) specifying the host address and port for the client to connect to.
+You can spin up a policy server running:
+
+```shell
+python src/lerobot/scripts/server/policy_server.py \
+ --host=127.0.0.1 \
+ --port=8080 \
+```
+
+This will start a policy server listening on `127.0.0.1:8080` (`localhost`, port 8080). At this stage, the policy server is empty, as all information related to which policy to run and with which parameters are specified during the first handshake with the client. Spin up a client with:
+
+```shell
+python src/lerobot/scripts/server/robot_client.py \
+ --server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server
+ --robot.type=so100_follower \ # ROBOT: your robot type
+ --robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port
+ --robot.id=follower_so100 \ # ROBOT: your robot id, to load calibration file
+ --robot.cameras="{ laptop: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}, phone: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ # POLICY: the cameras used to acquire frames, with keys matching the keys expected by the policy
+ --task="dummy" \ # POLICY: The task to run the policy on (`Fold my t-shirt`). Not necessarily defined for all policies, such as `act`
+ --policy_type=your_policy_type \ # POLICY: the type of policy to run (smolvla, act, etc)
+ --pretrained_name_or_path=user/model \ # POLICY: the model name/path on server to the checkpoint to run (e.g., lerobot/smolvla_base)
+ --policy_device=mps \ # POLICY: the device to run the policy on, on the server
+ --actions_per_chunk=50 \ # POLICY: the number of actions to output at once
+ --chunk_size_threshold=0.5 \ # CLIENT: the threshold for the chunk size before sending a new observation to the server
+ --aggregate_fn_name=weighted_average \ # CLIENT: the function to aggregate actions on overlapping portions
+ --debug_visualize_queue_size=True # CLIENT: whether to visualize the queue size at runtime
+```
+
+In summary, you need to specify instructions for:
+
+- `SERVER`: the address and port of the policy server
+- `ROBOT`: the type of robot to connect to, the port to connect to, and the local `id` of the robot
+- `POLICY`: the type of policy to run, and the model name/path on server to the checkpoint to run. You also need to specify which device should the sever be using, and how many actions to output at once (capped at the policy max actions value).
+- `CLIENT`: the threshold for the chunk size before sending a new observation to the server, and the function to aggregate actions on overlapping portions. Optionally, you can also visualize the queue size at runtime, to help you tune the `CLIENT` parameters.
+
+Importantly,
+
+- `actions_per_chunk` and `chunk_size_threshold` are key parameters to tune for your setup.
+- `aggregate_fn_name` is the function to aggregate actions on overlapping portions. You can either add a new one to a registry of functions, or add your own in `robot_client.py` (see [here](NOTE:addlinktoLOC))
+- `debug_visualize_queue_size` is a useful tool to tune the `CLIENT` parameters.
+
+## Done! You should see your robot moving around by now 😉
+
+## Async vs. synchronous inference
+
+Synchronous inference relies on interleaving action chunk prediction and action execution. This inherently results in _idle frames_, frames where the robot awaits idle the policy's output: a new action chunk.
+In turn, inference is plagued by evident real-time lags, where the robot simply stops acting due to the lack of available actions.
+With robotics models increasing in size, this problem risks becoming only more severe.
+
+
+
+
+
+ Synchronous inference makes the robot idle while the policy is
+ computing the next chunk of actions.
+
+
+To overcome this, we design async inference, a paradigm where action planning and execution are decoupled, resulting in (1) higher adaptability and, most importantly, (2) no idle frames.
+Crucially, with async inference, the next action chunk is computed _before_ the current one is exhausted, resulting in no idleness.
+Higher adaptability is ensured by aggregating the different action chunks on overlapping portions, obtaining an up-to-date plan and a tighter control loop.
+
+
+
+
+
+ Asynchronous inference results in no idleness because the next chunk is
+ computed before the current chunk is exhausted.
+
+
+---
+
+## Start the Policy Server
+
+Policy servers are wrappers around a `PreTrainedPolicy` interfacing them with observations coming from a robot client.
+Policy servers are initialized as empty containers which are populated with the requested policy specified in the initial handshake between the robot client and the policy server.
+As such, spinning up a policy server is as easy as specifying the host address and port. If you're running the policy server on the same machine as the robot client, you can use `localhost` as the host address.
+
+
+
+```bash
+python -m lerobot.scripts.server.policy_server \
+ --host="localhost" \
+ --port=8080
+```
+
+
+
+
+```python
+from lerobot.scripts.server.configs import PolicyServerConfig
+from lerobot.scripts.server.policy_server import serve
+
+config = PolicyServerConfig(
+ host="localhost",
+ port=8080,
+)
+serve(config)
+```
+
+
+
+
+
+This listens on `localhost:8080` for an incoming connection from the associated`RobotClient`, which will communicate which policy to run during the first client-server handshake.
+
+---
+
+## Launch the Robot Client
+
+`RobotClient` is a wrapper around a `Robot` instance, which `RobotClient` connects to the (possibly remote) `PolicyServer`.
+The `RobotClient` streams observations to the `PolicyServer`, and receives action chunks obtained running inference on the server (which we assume to have better computational resources than the robot controller).
+
+
+
+```bash
+python src/lerobot/scripts/server/robot_client.py \
+ --server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server
+ --robot.type=so100_follower \ # ROBOT: your robot type
+ --robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port
+ --robot.id=follower_so100 \ # ROBOT: your robot id, to load calibration file
+ --robot.cameras="{ laptop: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}, phone: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ # POLICY: the cameras used to acquire frames, with keys matching the keys expected by the policy
+ --task="dummy" \ # POLICY: The task to run the policy on (`Fold my t-shirt`). Not necessarily defined for all policies, such as `act`
+ --policy_type=your_policy_type \ # POLICY: the type of policy to run (smolvla, act, etc)
+ --pretrained_name_or_path=user/model \ # POLICY: the model name/path on server to the checkpoint to run (e.g., lerobot/smolvla_base)
+ --policy_device=mps \ # POLICY: the device to run the policy on, on the server
+ --actions_per_chunk=50 \ # POLICY: the number of actions to output at once
+ --chunk_size_threshold=0.5 \ # CLIENT: the threshold for the chunk size before sending a new observation to the server
+ --aggregate_fn_name=weighted_average \ # CLIENT: the function to aggregate actions on overlapping portions
+ --debug_visualize_queue_size=True # CLIENT: whether to visualize the queue size at runtime
+```
+
+
+
+
+```python
+import threading
+from lerobot.robots.so100_follower import SO100FollowerConfig
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.scripts.server.configs import RobotClientConfig
+from lerobot.scripts.server.robot_client import RobotClient
+from lerobot.scripts.server.helpers import visualize_action_queue_size
+
+# 1. Create the robot instance
+"""Check out the cameras available in your setup by running `python lerobot/find_cameras.py`"""
+# these cameras must match the ones expected by the policy
+# check the config.json on the Hub for the policy you are using
+camera_cfg = {
+ "top": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
+ "side": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30)
+}
+
+robot_cfg = SO100FollowerConfig(
+ port="/dev/tty.usbmodem585A0076841",
+ id="follower_so100",
+ cameras=camera_cfg
+)
+
+# 3. Create client configuration
+client_cfg = RobotClientConfig(
+ robot=robot_cfg,
+ server_address="localhost:8080",
+ policy_device="mps",
+ policy_type="smolvla",
+ pretrained_name_or_path="fracapuano/smolvla_async",
+ chunk_size_threshold=0.5,
+ actions_per_chunk=50, # make sure this is less than the max actions of the policy
+)
+
+# 4. Create and start client
+client = RobotClient(client_cfg)
+
+# 5. Specify the task
+task = "Don't do anything, stay still"
+
+if client.start():
+ # Start action receiver thread
+ action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True)
+ action_receiver_thread.start()
+
+ try:
+ # Run the control loop
+ client.control_loop(task)
+ except KeyboardInterrupt:
+ client.stop()
+ action_receiver_thread.join()
+ # (Optionally) plot the action queue size
+ visualize_action_queue_size(client.action_queue_size)
+```
+
+
+
+
+
+The following two parameters are key in every setup:
+
+
+
+
+
Hyperparameter
+
Default
+
What it does
+
+
+
+
+
+ actions_per_chunk
+
+
50
+
+ How many actions the policy outputs at once. Typical values: 10-50.
+
+
+
+
+ chunk_size_threshold
+
+
0.7
+
+ When the queue is ≤ 50% full, the client sends a fresh observation.
+ Value in [0, 1].
+
+
+
+
+
+
+ Different values of `actions_per_chunk` and `chunk_size_threshold` do result
+ in different behaviours.
+
+
+On the one hand, increasing the value of `actions_per_chunk` will result in reducing the likelihood of ending up with no actions to execute, as more actions will be available when the new chunk is computed.
+However, larger values of `actions_per_chunk` might also result in less precise actions, due to the compounding errors consequent to predicting actions over longer timespans.
+
+On the other hand, increasing the value of `chunk_size_threshold` will result in sending out to the `PolicyServer` observations for inference more often, resulting in a larger number of updates action chunks, overlapping on significant portions. This results in high adaptability, in the limit predicting one action chunk for each observation, which is in turn only marginally consumed while a new one is produced.
+This option does also put more pressure on the inference pipeline, as a consequence of the many requests. Conversely, values of `chunk_size_threshold` close to 0.0 collapse to the synchronous edge case, whereby new observations are only sent out whenever the current chunk is exhausted.
+
+We found the default values of `actions_per_chunk` and `chunk_size_threshold` to work well in the experiments we developed for the [SmolVLA paper](https://huggingface.co/papers/2506.01844), but recommend experimenting with different values to find the best fit for your setup.
+
+### Tuning async inference for your setup
+
+1. **Choose your computational resources carefully.** [PI0](https://huggingface.co/lerobot/pi0) occupies 14GB of memory at inference time, while [SmolVLA](https://huggingface.co/lerobot/smolvla_base) requires only ~2GB. You should identify the best computational resource for your use case keeping in mind smaller policies require less computational resources. The combination of policy and device used (CPU-intensive, using MPS, or the number of CUDA cores on a given NVIDIA GPU) directly impacts the average inference latency you should expect.
+2. **Adjust your `fps` based on inference latency.** While the server generates a new action chunk, the client is not idle and is stepping through its current action queue. If the two processes happen at fundamentally different speeds, the client might end up with an empty queue. As such, you should reduce your fps if you consistently run out of actions in queue.
+3. **Adjust `chunk_size_threshold`**.
+ - Values closer to `0.0` result in almost sequential behavior. Values closer to `1.0` → send observation every step (more bandwidth, relies on good world-model).
+ - We found values around 0.5-0.6 to work well. If you want to tweak this, spin up a `RobotClient` setting the `--debug-visualize-queue-size` to `True`. This will plot the action queue size evolution at runtime, and you can use it to find the value of `chunk_size_threshold` that works best for your setup.
+
+
+
+
+
+
+ The action queue size is plotted at runtime when the
+ `--debug-visualize-queue-size` flag is passed, for various levels of
+ `chunk_size_threshold` (`g` in the SmolVLA paper).
+
+
+
+---
+
+## Conclusion
+
+Asynchronous inference represents a significant advancement in real-time robotics control, addressing the fundamental challenge of inference latency that has long plagued robotics applications. Through this tutorial, you've learned how to implement a complete async inference pipeline that eliminates idle frames and enables smoother, more reactive robot behaviors.
+
+**Key Takeaways:**
+
+- **Paradigm Shift**: Async inference decouples action prediction from execution, allowing robots to continue acting while new action chunks are computed in parallel
+- **Performance Benefits**: Eliminates "wait-for-inference" lags that are inherent in synchronous approaches, becoming increasingly important as policy models grow larger
+- **Flexible Architecture**: The server-client design enables distributed computing, where inference can run on powerful remote hardware while maintaining real-time robot control
+- **Tunable Parameters**: Success depends on properly configuring `actions_per_chunk` and `chunk_size_threshold` for your specific hardware, policy, and task requirements
+- **Universal Compatibility**: Works with all LeRobot-supported policies, from lightweight ACT models to vision-language models like SmolVLA
+
+Start experimenting with the default parameters, monitor your action queue sizes, and iteratively refine your setup to achieve optimal performance for your specific use case.
+If you want to discuss this further, hop into our [Discord community](https://discord.gg/s3KuuzsPFb), or open an issue on our [GitHub repository](https://github.com/lerobot/lerobot/issues).
diff --git a/docs/source/backwardcomp.mdx b/docs/source/backwardcomp.mdx
new file mode 100644
index 0000000000..0e1d016365
--- /dev/null
+++ b/docs/source/backwardcomp.mdx
@@ -0,0 +1,95 @@
+# Backward compatibility
+
+## Hardware API redesign
+
+PR [#777](https://github.com/huggingface/lerobot/pull/777) improves the LeRobot calibration but is **not backward-compatible**. Below is a overview of what changed and how you can continue to work with datasets created before this pull request.
+
+### What changed?
+
+| | Before PR #777 | After PR #777 |
+| --------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ |
+| **Joint range** | Degrees `-180...180°` | **Normalised range** Joints: `–100...100` Gripper: `0...100` |
+| **Zero position (SO100 / SO101)** | Arm fully extended horizontally | **In middle of the range for each joint** |
+| **Boundary handling** | Software safeguards to detect ±180 ° wrap-arounds | No wrap-around logic needed due to mid-range zero |
+
+---
+
+### Impact on existing datasets
+
+- Recorded trajectories created **before** PR #777 will replay incorrectly if loaded directly:
+ - Joint angles are offset and incorrectly normalized.
+- Any models directly finetuned or trained on the old data will need their inputs and outputs converted.
+
+### Using datasets made with the previous calibration system
+
+We provide a migration example script for replaying an episode recorded with the previous calibration here: `examples/backward_compatibility/replay.py`.
+Below we take you through the modifications that are done in the example script to make the previous calibration datasets work.
+
+```diff
++ key = f"{name.removeprefix('main_')}.pos"
+ action[key] = action_array[i].item()
++ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
++ action["elbow_flex.pos"] -= 90
+```
+
+Let's break this down.
+New codebase uses `.pos` suffix for the position observations and we have removed `main_` prefix:
+
+
+```python
+key = f"{name.removeprefix('main_')}.pos"
+```
+
+
+For `"shoulder_lift"` (id = 2), the 0 position is changed by -90 degrees and the direction is reversed compared to old calibration/code.
+
+
+```python
+action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
+```
+
+
+For `"elbow_flex"` (id = 3), the 0 position is changed by -90 degrees compared to old calibration/code.
+
+
+```python
+action["elbow_flex.pos"] -= 90
+```
+
+
+To use degrees normalization we then set the `--robot.use_degrees` option to `true`.
+
+```diff
+python examples/backward_compatibility/replay.py \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem5A460814411 \
+ --robot.id=blue \
++ --robot.use_degrees=true \
+ --dataset.repo_id=my_dataset_id \
+ --dataset.episode=0
+```
+
+### Using policies trained with the previous calibration system
+
+Policies output actions in the same format as the datasets (`torch.Tensors`). Therefore, the same transformations should be applied.
+
+To find these transformations, we recommend to first try and and replay an episode of the dataset your policy was trained on using the section above.
+Then, add these same transformations on your inference script (shown here in the `record.py` script):
+
+```diff
+action_values = predict_action(
+ observation_frame,
+ policy,
+ get_safe_torch_device(policy.config.device),
+ policy.config.use_amp,
+ task=single_task,
+ robot_type=robot.robot_type,
+ )
+ action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)}
+
++ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
++ action["elbow_flex.pos"] -= 90
+ robot.send_action(action)
+```
+
+If you have questions or run into migration issues, feel free to ask them on [Discord](https://discord.gg/s3KuuzsPFb)
diff --git a/docs/source/cameras.mdx b/docs/source/cameras.mdx
new file mode 100644
index 0000000000..604863d74a
--- /dev/null
+++ b/docs/source/cameras.mdx
@@ -0,0 +1,206 @@
+# Cameras
+
+LeRobot offers multiple options for video capture, including phone cameras, built-in laptop cameras, external webcams, and Intel RealSense cameras. To efficiently record frames from most cameras, you can use either the `OpenCVCamera` or `RealSenseCamera` class. For additional compatibility details on the `OpenCVCamera` class, refer to the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
+
+### Finding your camera
+
+To instantiate a camera, you need a camera identifier. This identifier might change if you reboot your computer or re-plug your camera, a behavior mostly dependant on your operating system.
+
+To find the camera indices of the cameras plugged into your system, run the following script:
+
+```bash
+python -m lerobot.find_cameras opencv # or realsense for Intel Realsense cameras
+```
+
+The output will look something like this if you have two cameras connected:
+
+```
+--- Detected Cameras ---
+Camera #0:
+ Name: OpenCV Camera @ 0
+ Type: OpenCV
+ Id: 0
+ Backend api: AVFOUNDATION
+ Default stream profile:
+ Format: 16.0
+ Width: 1920
+ Height: 1080
+ Fps: 15.0
+--------------------
+(more cameras ...)
+```
+
+> [!WARNING]
+> When using Intel RealSense cameras in `macOS`, you could get this [error](https://github.com/IntelRealSense/librealsense/issues/12307): `Error finding RealSense cameras: failed to set power state`, this can be solved by running the same command with `sudo` permissions. Note that using RealSense cameras in `macOS` is unstable.
+
+## Use Cameras
+
+Below are two examples, demonstrating how to work with the API.
+
+- **Asynchronous frame capture** using an OpenCV-based camera
+- **Color and depth capture** using an Intel RealSense camera
+
+
+
+
+
+```python
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.cameras.opencv.camera_opencv import OpenCVCamera
+from lerobot.cameras.configs import ColorMode, Cv2Rotation
+
+# Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation.
+config = OpenCVCameraConfig(
+ index_or_path=0,
+ fps=15,
+ width=1920,
+ height=1080,
+ color_mode=ColorMode.RGB,
+ rotation=Cv2Rotation.NO_ROTATION
+)
+
+# Instantiate and connect an `OpenCVCamera`, performing a warm-up read (default).
+camera = OpenCVCamera(config)
+camera.connect()
+
+# Read frames asynchronously in a loop via `async_read(timeout_ms)`
+try:
+ for i in range(10):
+ frame = camera.async_read(timeout_ms=200)
+ print(f"Async frame {i} shape:", frame.shape)
+finally:
+ camera.disconnect()
+```
+
+
+
+
+
+
+```python
+from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig
+from lerobot.cameras.realsense.camera_realsense import RealSenseCamera
+from lerobot.cameras.configs import ColorMode, Cv2Rotation
+
+# Create a `RealSenseCameraConfig` specifying your camera’s serial number and enabling depth.
+config = RealSenseCameraConfig(
+ serial_number_or_name="233522074606",
+ fps=15,
+ width=640,
+ height=480,
+ color_mode=ColorMode.RGB,
+ use_depth=True,
+ rotation=Cv2Rotation.NO_ROTATION
+)
+
+# Instantiate and connect a `RealSenseCamera` with warm-up read (default).
+camera = RealSenseCamera(config)
+camera.connect()
+
+# Capture a color frame via `read()` and a depth map via `read_depth()`.
+try:
+ color_frame = camera.read()
+ depth_map = camera.read_depth()
+ print("Color frame shape:", color_frame.shape)
+ print("Depth map shape:", depth_map.shape)
+finally:
+ camera.disconnect()
+```
+
+
+
+
+
+## Use your phone
+
+
+
+
+To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
+
+- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
+- Sign in both devices with the same Apple ID.
+- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
+
+For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
+
+Your iPhone should be detected automatically when running the camera setup script in the next section.
+
+
+
+
+If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
+
+1. _Install `v4l2loopback-dkms` and `v4l-utils`_. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
+
+
+```python
+sudo apt install v4l2loopback-dkms v4l-utils
+```
+
+
+2. _Install [DroidCam](https://droidcam.app) on your phone_. This app is available for both iOS and Android.
+3. _Install [OBS Studio](https://obsproject.com)_. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
+
+
+```python
+flatpak install flathub com.obsproject.Studio
+```
+
+
+4. _Install the DroidCam OBS plugin_. This plugin integrates DroidCam with OBS Studio. Install it with:
+
+
+```python
+flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
+```
+
+
+5. _Start OBS Studio_. Launch with:
+
+
+```python
+flatpak run com.obsproject.Studio
+```
+
+
+6. _Add your phone as a source_. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
+7. _Adjust resolution settings_. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
+8. _Start virtual camera_. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
+9. _Verify the virtual camera setup_. Use `v4l2-ctl` to list the devices:
+
+
+```python
+v4l2-ctl --list-devices
+```
+
+
+You should see an entry like:
+
+```
+VirtualCam (platform:v4l2loopback-000):
+/dev/video1
+```
+
+10. _Check the camera resolution_. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
+
+
+```python
+v4l2-ctl -d /dev/video1 --get-fmt-video
+```
+
+
+You should see an entry like:
+
+```
+>>> Format Video Capture:
+>>> Width/Height : 640/480
+>>> Pixel Format : 'YUYV' (YUYV 4:2:2)
+```
+
+Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
+
+If everything is set up correctly, you can proceed with the rest of the tutorial.
+
+
+
diff --git a/docs/source/contributing.md b/docs/source/contributing.md
new file mode 120000
index 0000000000..f939e75f21
--- /dev/null
+++ b/docs/source/contributing.md
@@ -0,0 +1 @@
+../../CONTRIBUTING.md
\ No newline at end of file
diff --git a/docs/source/hilserl.mdx b/docs/source/hilserl.mdx
new file mode 100644
index 0000000000..c647a58d5e
--- /dev/null
+++ b/docs/source/hilserl.mdx
@@ -0,0 +1,601 @@
+# HIL-SERL Real Robot Training Workflow Guide
+
+In this tutorial you will go through the full Human-in-the-Loop Sample-Efficient Reinforcement Learning (HIL-SERL) workflow using LeRobot. You will master training a policy with RL on a real robot in just a few hours.
+
+HIL-SERL is a sample-efficient reinforcement learning algorithm that combines human demonstrations with online learning and human interventions. The approach starts from a small set of human demonstrations, uses them to train a reward classifier, and then employs an actor-learner architecture where humans can intervene during policy execution to guide exploration and correct unsafe behaviors. In this tutorial, you'll use a gamepad to provide interventions and control the robot during the learning process.
+
+It combines three key ingredients: 1. **Offline demonstrations & reward classifier:** a handful of human-teleop episodes plus a vision-based success detector give the policy a shaped starting point. 2. **On-robot actor / learner loop with human interventions:** a distributed Soft Actor Critic (SAC) learner updates the policy while an actor explores on the physical robot; the human can jump in at any time to correct dangerous or unproductive behaviour. 3. **Safety & efficiency tools:** joint/end-effector (EE) bounds, crop region of interest (ROI) preprocessing and WandB monitoring keep the data useful and the hardware safe.
+
+Together these elements let HIL-SERL reach near-perfect task success and faster cycle times than imitation-only baselines.
+
+
+
+
+
+
+ HIL-SERL workflow, Luo et al. 2024
+
+
+This guide provides step-by-step instructions for training a robot policy using LeRobot's HilSerl implementation to train on a real robot.
+
+## What do I need?
+
+- A gamepad (recommended) or keyboard to control the robot
+- A Nvidia GPU
+- A real robot with a follower and leader arm (optional if you use the keyboard or the gamepad)
+- A URDF file for the robot for the kinematics package (check `lerobot/common/model/kinematics.py`)
+
+## What kind of tasks can I train?
+
+One can use HIL-SERL to train on a variety of manipulation tasks. Some recommendations:
+
+- Start with a simple task to understand how the system works.
+ - Push cube to a goal region
+ - Pick and lift cube with the gripper
+- Avoid extremely long horizon tasks. Focus on tasks that can be completed in 5-10 seconds.
+- Once you have a good idea of how the system works, you can try more complex tasks and longer horizons.
+ - Pick and place cube
+ - Bimanual tasks to pick objects with two arms
+ - Hand-over tasks to transfer objects from one arm to another
+ - Go crazy!
+
+## Install LeRobot with HIL-SERL
+
+To install LeRobot with HIL-SERL, you need to install the `hilserl` extra.
+
+```bash
+pip install -e ".[hilserl]"
+```
+
+## Real Robot Training Workflow
+
+### Understanding Configuration
+
+The training process begins with proper configuration for the HILSerl environment. The configuration class of interest is `HILSerlRobotEnvConfig` in `lerobot/envs/configs.py`. Which is defined as:
+
+
+```python
+class HILSerlRobotEnvConfig(EnvConfig):
+ robot: RobotConfig | None = None # Main robot agent (defined in `lerobot/robots`)
+ teleop: TeleoperatorConfig | None = None # Teleoperator agent, e.g., gamepad or leader arm, (defined in `lerobot/teleoperators`)
+ wrapper: EnvTransformConfig | None = None # Environment wrapper settings; check `lerobot/scripts/server/gym_manipulator.py`
+ fps: int = 10 # Control frequency
+ name: str = "real_robot" # Environment name
+ mode: str = None # "record", "replay", or None (for training)
+ repo_id: str | None = None # LeRobot dataset repository ID
+ dataset_root: str | None = None # Local dataset root (optional)
+ task: str = "" # Task identifier
+ num_episodes: int = 10 # Number of episodes for recording
+ episode: int = 0 # episode index for replay
+ device: str = "cuda" # Compute device
+ push_to_hub: bool = True # Whether to push the recorded datasets to Hub
+ pretrained_policy_name_or_path: str | None = None # For policy loading
+ reward_classifier_pretrained_path: str | None = None # For reward model
+ number_of_steps_after_success: int = 0 # For reward classifier, collect more positive examples after a success to train a classifier
+```
+
+
+### Finding Robot Workspace Bounds
+
+Before collecting demonstrations, you need to determine the appropriate operational bounds for your robot.
+
+This helps simplify the problem of learning on the real robot in two ways: 1) by limiting the robot's operational space to a specific region that solves the task and avoids unnecessary or unsafe exploration, and 2) by allowing training in end-effector space rather than joint space. Empirically, learning in joint space for reinforcement learning in manipulation is often a harder problem - some tasks are nearly impossible to learn in joint space but become learnable when the action space is transformed to end-effector coordinates.
+
+**Using find_joint_limits.py**
+
+This script helps you find the safe operational bounds for your robot's end-effector. Given that you have a follower and leader arm, you can use the script to find the bounds for the follower arm that will be applied during training.
+Bounding the action space will reduce the redundant exploration of the agent and guarantees safety.
+
+```bash
+python -m lerobot.scripts.find_joint_limits \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=black \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=blue
+```
+
+**Workflow**
+
+1. Run the script and move the robot through the space that solves the task
+2. The script will record the minimum and maximum end-effector positions and the joint angles and prints them to the console, for example:
+ ```
+ Max ee position [0.2417 0.2012 0.1027]
+ Min ee position [0.1663 -0.0823 0.0336]
+ Max joint positions [-20.0, -20.0, -20.0, -20.0, -20.0, -20.0]
+ Min joint positions [50.0, 50.0, 50.0, 50.0, 50.0, 50.0]
+ ```
+3. Use these values in the configuration of your teleoperation device (TeleoperatorConfig) under the `end_effector_bounds` field
+
+**Example Configuration**
+
+```json
+"end_effector_bounds": {
+ "max": [0.24, 0.20, 0.10],
+ "min": [0.16, -0.08, 0.03]
+}
+```
+
+### Collecting Demonstrations
+
+With the bounds defined, you can safely collect demonstrations for training. Training RL with off-policy algorithm allows us to use offline datasets collected in order to improve the efficiency of the learning process.
+
+**Setting Up Record Mode**
+
+Create a configuration file for recording demonstrations (or edit an existing one like [env_config_so100.json](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_so100.json)):
+
+1. Set `mode` to `"record"`
+2. Specify a unique `repo_id` for your dataset (e.g., "username/task_name")
+3. Set `num_episodes` to the number of demonstrations you want to collect
+4. Set `crop_params_dict` to `null` initially (we'll determine crops later)
+5. Configure `robot`, `cameras`, and other hardware settings
+
+Example configuration section:
+
+```json
+"mode": "record",
+"repo_id": "username/pick_lift_cube",
+"dataset_root": null,
+"task": "pick_and_lift",
+"num_episodes": 15,
+"episode": 0,
+"push_to_hub": true
+```
+
+### Using a Teleoperation Device
+
+Along with your robot, you will need a teleoperation device to control it in order to collect datasets of your task and perform interventions during the online training.
+We support using a gamepad or a keyboard or the leader arm of the robot.
+
+HIL-Serl learns actions in the end-effector space of the robot. Therefore, the teleoperation will control the end-effector's x,y,z displacements.
+
+For that we need to define a version of the robot that takes actions in the end-effector space. Check the robot class `SO100FollowerEndEffector` and its configuration `SO100FollowerEndEffectorConfig` for the default parameters related to the end-effector space.
+
+
+```python
+class SO100FollowerEndEffectorConfig(SO100FollowerConfig):
+ """Configuration for the SO100FollowerEndEffector robot."""
+
+ # Default bounds for the end-effector position (in meters)
+ end_effector_bounds: dict[str, list[float]] = field( # bounds for the end-effector in x,y,z direction
+ default_factory=lambda: {
+ "min": [-1.0, -1.0, -1.0], # min x, y, z
+ "max": [1.0, 1.0, 1.0], # max x, y, z
+ }
+ )
+
+ max_gripper_pos: float = 50 # maximum gripper position that the gripper will be open at
+
+ end_effector_step_sizes: dict[str, float] = field( # maximum step size for the end-effector in x,y,z direction
+ default_factory=lambda: {
+ "x": 0.02,
+ "y": 0.02,
+ "z": 0.02,
+ }
+ )
+```
+
+
+The `Teleoperator` defines the teleoperation device. You can check the list of available teleoperators in `lerobot/teleoperators`.
+
+**Setting up the Gamepad**
+
+The gamepad provides a very convenient way to control the robot and the episode state.
+
+To setup the gamepad, you need to set the `control_mode` to `"gamepad"` and define the `teleop` section in the configuration file.
+
+```json
+ "teleop": {
+ "type": "gamepad",
+ "use_gripper": true
+ },
+```
+
+
+
+
+
+ Gamepad button mapping for robot control and episode management
+
+
+**Setting up the SO101 leader**
+
+The SO101 leader arm has reduced gears that allows it to move and track the follower arm during exploration. Therefore, taking over is much smoother than the gearless SO100.
+
+To setup the SO101 leader, you need to set the `control_mode` to `"leader"` and define the `teleop` section in the configuration file.
+
+```json
+ "teleop": {
+ "type": "so101_leader",
+ "port": "/dev/tty.usbmodem585A0077921", # check your port number
+ "use_degrees": true
+ },
+```
+
+In order to annotate the success/failure of the episode, **you will need** to use a keyboard to press `s` for success, `esc` for failure.
+During the online training, press `space` to take over the policy and `space` again to give the control back to the policy.
+
+
+Video: SO101 leader teleoperation
+
+
+
+
+
+
SO101 leader teleoperation example, the leader tracks the follower, press `space` to intervene
+
+
+**Recording Demonstrations**
+
+Start the recording process, an example of the config file can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_so100.json):
+
+```bash
+python -m lerobot.scripts.rl.gym_manipulator --config_path src/lerobot/configs/env_config_so100.json
+```
+
+During recording:
+
+1. The robot will reset to the initial position defined in the configuration file `fixed_reset_joint_positions`
+2. Complete the task successfully
+3. The episode ends with a reward of 1 when you press the "success" button
+4. If the time limit is reached, or the fail button is pressed, the episode ends with a reward of 0
+5. You can rerecord an episode by pressing the "rerecord" button
+6. The process automatically continues to the next episode
+7. After recording all episodes, the dataset is pushed to the Hugging Face Hub (optional) and saved locally
+
+### Processing the Dataset
+
+After collecting demonstrations, process them to determine optimal camera crops.
+Reinforcement learning is sensitive to background distractions, so it is important to crop the images to the relevant workspace area.
+
+Visual RL algorithms learn directly from pixel inputs, making them vulnerable to irrelevant visual information. Background elements like changing lighting, shadows, people moving, or objects outside the workspace can confuse the learning process. Good ROI selection should:
+
+- Include only the essential workspace where the task happens
+- Capture the robot's end-effector and all objects involved in the task
+- Exclude unnecessary background elements and distractions
+
+Note: If you already know the crop parameters, you can skip this step and just set the `crop_params_dict` in the configuration file during recording.
+
+**Determining Crop Parameters**
+
+Use the `crop_dataset_roi.py` script to interactively select regions of interest in your camera images:
+
+```bash
+python -m lerobot.scripts.rl.crop_dataset_roi --repo-id username/pick_lift_cube
+```
+
+1. For each camera view, the script will display the first frame
+2. Draw a rectangle around the relevant workspace area
+3. Press 'c' to confirm the selection
+4. Repeat for all camera views
+5. The script outputs cropping parameters and creates a new cropped dataset
+
+Example output:
+
+```
+Selected Rectangular Regions of Interest (top, left, height, width):
+observation.images.side: [180, 207, 180, 200]
+observation.images.front: [180, 250, 120, 150]
+```
+
+
+
+
+
+
+ Interactive cropping tool for selecting regions of interest
+
+
+**Updating Configuration**
+
+Add these crop parameters to your training configuration:
+
+```json
+"crop_params_dict": {
+ "observation.images.side": [180, 207, 180, 200],
+ "observation.images.front": [180, 250, 120, 150]
+},
+"resize_size": [128, 128]
+```
+
+**Recommended image resolution**
+
+Most vision-based policies have been validated on square inputs of either **128×128** (default) or **64×64** pixels. We therefore advise setting the resize_size parameter to [128, 128] – or [64, 64] if you need to save GPU memory and bandwidth. Other resolutions are possible but have not been extensively tested.
+
+### Training a Reward Classifier
+
+The reward classifier plays an important role in the HIL-SERL workflow by automating reward assignment and automatically detecting episode success. Instead of manually defining reward functions or relying on human feedback for every timestep, the reward classifier learns to predict success/failure from visual observations. This enables the RL algorithm to learn efficiently by providing consistent and automated reward signals based on the robot's camera inputs.
+
+This guide explains how to train a reward classifier for human-in-the-loop reinforcement learning implementation of LeRobot. Reward classifiers learn to predict the reward value given a state which can be used in an RL setup to train a policy.
+
+**Note**: Training a reward classifier is optional. You can start the first round of RL experiments by annotating the success manually with your gamepad or keyboard device.
+
+The reward classifier implementation in `modeling_classifier.py` uses a pretrained vision model to process the images. It can output either a single value for binary rewards to predict success/fail cases or multiple values for multi-class settings.
+
+**Collecting a Dataset for the reward classifier**
+
+Before training, you need to collect a dataset with labeled examples. The `record_dataset` function in `gym_manipulator.py` enables the process of collecting a dataset of observations, actions, and rewards.
+
+To collect a dataset, you need to modify some parameters in the environment configuration based on HILSerlRobotEnvConfig.
+
+```bash
+python -m lerobot.scripts.rl.gym_manipulator --config_path src/lerobot/configs/reward_classifier_train_config.json
+```
+
+**Key Parameters for Data Collection**
+
+- **mode**: set it to `"record"` to collect a dataset
+- **repo_id**: `"hf_username/dataset_name"`, name of the dataset and repo on the hub
+- **num_episodes**: Number of episodes to record
+- **number_of_steps_after_success**: Number of additional frames to record after a success (reward=1) is detected
+- **fps**: Number of frames per second to record
+- **push_to_hub**: Whether to push the dataset to the hub
+
+The `number_of_steps_after_success` parameter is crucial as it allows you to collect more positive examples. When a success is detected, the system will continue recording for the specified number of steps while maintaining the reward=1 label. Otherwise, there won't be enough states in the dataset labeled to 1 to train a good classifier.
+
+Example configuration section for data collection:
+
+```json
+{
+ "mode": "record",
+ "repo_id": "hf_username/dataset_name",
+ "dataset_root": "data/your_dataset",
+ "num_episodes": 20,
+ "push_to_hub": true,
+ "fps": 10,
+ "number_of_steps_after_success": 15
+}
+```
+
+**Reward Classifier Configuration**
+
+The reward classifier is configured using `configuration_classifier.py`. Here are the key parameters:
+
+- **model_name**: Base model architecture (e.g., we mainly use `"helper2424/resnet10"`)
+- **model_type**: `"cnn"` or `"transformer"`
+- **num_cameras**: Number of camera inputs
+- **num_classes**: Number of output classes (typically 2 for binary success/failure)
+- **hidden_dim**: Size of hidden representation
+- **dropout_rate**: Regularization parameter
+- **learning_rate**: Learning rate for optimizer
+
+Example configuration for training the [reward classifier](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/reward_classifier_train_config.json):
+
+```json
+{
+ "policy": {
+ "type": "reward_classifier",
+ "model_name": "helper2424/resnet10",
+ "model_type": "cnn",
+ "num_cameras": 2,
+ "num_classes": 2,
+ "hidden_dim": 256,
+ "dropout_rate": 0.1,
+ "learning_rate": 1e-4,
+ "device": "cuda",
+ "use_amp": true,
+ "input_features": {
+ "observation.images.front": {
+ "type": "VISUAL",
+ "shape": [3, 128, 128]
+ },
+ "observation.images.side": {
+ "type": "VISUAL",
+ "shape": [3, 128, 128]
+ }
+ }
+ }
+}
+```
+
+**Training the Classifier**
+
+To train the classifier, use the `train.py` script with your configuration:
+
+```bash
+python -m lerobot.scripts.train --config_path path/to/reward_classifier_train_config.json
+```
+
+**Deploying and Testing the Model**
+
+To use your trained reward classifier, configure the `HILSerlRobotEnvConfig` to use your model:
+
+
+```python
+env_config = HILSerlRobotEnvConfig(
+ reward_classifier_pretrained_path="path_to_your_pretrained_trained_model",
+ # Other environment parameters
+)
+```
+
+
+or set the argument in the json config file.
+
+```json
+{
+ "reward_classifier_pretrained_path": "path_to_your_pretrained_model"
+}
+```
+
+Run `gym_manipulator.py` to test the model.
+
+```bash
+python -m lerobot.scripts.rl.gym_manipulator --config_path path/to/env_config.json
+```
+
+The reward classifier will automatically provide rewards based on the visual input from the robot's cameras.
+
+**Example Workflow for training the reward classifier**
+
+1. **Create the configuration files**:
+ Create the necessary json configuration files for the reward classifier and the environment. Check the examples [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/tree/main).
+
+2. **Collect a dataset**:
+
+ ```bash
+ python -m lerobot.scripts.rl.gym_manipulator --config_path src/lerobot/configs/env_config.json
+ ```
+
+3. **Train the classifier**:
+
+ ```bash
+ python -m lerobot.scripts.train --config_path src/lerobot/configs/reward_classifier_train_config.json
+ ```
+
+4. **Test the classifier**:
+ ```bash
+ python -m lerobot.scripts.rl.gym_manipulator --config_path src/lerobot/configs/env_config.json
+ ```
+
+### Training with Actor-Learner
+
+The LeRobot system uses a distributed actor-learner architecture for training. This architecture decouples robot interactions from the learning process, allowing them to run concurrently without blocking each other. The actor server handles robot observations and actions, sending interaction data to the learner server. The learner server performs gradient descent and periodically updates the actor's policy weights. You will need to start two processes: a learner and an actor.
+
+**Configuration Setup**
+
+Create a training configuration file (example available [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/train_config_hilserl_so100.json)). The training config is based on the main `TrainRLServerPipelineConfig` class in `lerobot/configs/train.py`.
+
+1. Configure the policy settings (`type="sac"`, `device`, etc.)
+2. Set `dataset` to your cropped dataset
+3. Configure environment settings with crop parameters
+4. Check the other parameters related to SAC in [configuration_sac.py](https://github.com/huggingface/lerobot/blob/19bb621a7d0a31c20cd3cc08b1dbab68d3031454/lerobot/policies/sac/configuration_sac.py#L79).
+5. Verify that the `policy` config is correct with the right `input_features` and `output_features` for your task.
+
+**Starting the Learner**
+
+First, start the learner server process:
+
+```bash
+python -m lerobot.scripts.rl.learner --config_path src/lerobot/configs/train_config_hilserl_so100.json
+```
+
+The learner:
+
+- Initializes the policy network
+- Prepares replay buffers
+- Opens a `gRPC` server to communicate with actors
+- Processes transitions and updates the policy
+
+**Starting the Actor**
+
+In a separate terminal, start the actor process with the same configuration:
+
+```bash
+python -m lerobot.scripts.rl.actor --config_path src/lerobot/configs/train_config_hilserl_so100.json
+```
+
+The actor:
+
+- Connects to the learner via `gRPC`
+- Initializes the environment
+- Execute rollouts of the policy to collect experience
+- Sends transitions to the learner
+- Receives updated policy parameters
+
+**Training Flow**
+
+The training proceeds automatically:
+
+1. The actor executes the policy in the environment
+2. Transitions are collected and sent to the learner
+3. The learner updates the policy based on these transitions
+4. Updated policy parameters are sent back to the actor
+5. The process continues until the specified step limit is reached
+
+**Human in the Loop**
+
+- The key to learning efficiently is to have human interventions to provide corrective feedback and completing the task to aide the policy learning and exploration.
+- To perform human interventions, you can press the upper right trigger button on the gamepad (or the `space` key on the keyboard). This will pause the policy actions and allow you to take over.
+- A successful experiment is one where the human has to intervene at the start but then reduces the amount of interventions as the policy improves. You can monitor the intervention rate in the `wandb` dashboard.
+
+
+
+
+
+
+
+ Example showing how human interventions help guide policy learning over time
+
+
+
+- The figure shows the plot of the episodic reward over interaction step. The figure shows the effect of human interventions on the policy learning.
+- The orange curve is an experiment without any human interventions. While the pink and blue curves are experiments with human interventions.
+- We can observe that the number of steps where the policy starts achieving the maximum reward is cut by a quarter when human interventions are present.
+
+**Monitoring and Debugging**
+
+If you have `wandb.enable` set to `true` in your configuration, you can monitor training progress in real-time through the [Weights & Biases](https://wandb.ai/site/) dashboard.
+
+### Guide to Human Interventions
+
+The learning process is very sensitive to the intervention strategy. It will takes a few runs to understand how to intervene effectively. Some tips and hints:
+
+- Allow the policy to explore for a few episodes at the start of training.
+- Avoid intervening for long periods of time. Try to intervene in situation to correct the robot's behaviour when it goes off track.
+- Once the policy starts achieving the task, even if its not perfect, you can limit your interventions to simple quick actions like a simple grasping commands.
+
+The ideal behaviour is that your intervention rate should drop gradually during training as shown in the figure below.
+
+
+
+
+
+
+
+ Plot of the intervention rate during a training run on a pick and lift cube
+ task
+
+
+
+### Key hyperparameters to tune
+
+Some configuration values have a disproportionate impact on training stability and speed:
+
+- **`temperature_init`** (`policy.temperature_init`) – initial entropy temperature in SAC. Higher values encourage more exploration; lower values make the policy more deterministic early on. A good starting point is `1e-2`. We observed that setting it too high can make human interventions ineffective and slow down learning.
+- **`policy_parameters_push_frequency`** (`policy.actor_learner_config.policy_parameters_push_frequency`) – interval in _seconds_ between two weight pushes from the learner to the actor. The default is `4 s`. Decrease to **1-2 s** to provide fresher weights (at the cost of more network traffic); increase only if your connection is slow, as this will reduce sample efficiency.
+- **`storage_device`** (`policy.storage_device`) – device on which the learner keeps the policy parameters. If you have spare GPU memory, set this to `"cuda"` (instead of the default `"cpu"`). Keeping the weights on-GPU removes CPU→GPU transfer overhead and can significantly increase the number of learner updates per second.
+
+Congrats 🎉, you have finished this tutorial!
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
+
+Paper citation:
+
+```
+@article{luo2024precise,
+ title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning},
+ author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey},
+ journal={arXiv preprint arXiv:2410.21845},
+ year={2024}
+}
+```
diff --git a/docs/source/hilserl_sim.mdx b/docs/source/hilserl_sim.mdx
new file mode 100644
index 0000000000..c739be835c
--- /dev/null
+++ b/docs/source/hilserl_sim.mdx
@@ -0,0 +1,128 @@
+# Train RL in Simulation
+
+This guide explains how to use the `gym_hil` simulation environments as an alternative to real robots when working with the LeRobot framework for Human-In-the-Loop (HIL) reinforcement learning.
+
+`gym_hil` is a package that provides Gymnasium-compatible simulation environments specifically designed for Human-In-the-Loop reinforcement learning. These environments allow you to:
+
+- Train policies in simulation to test the RL stack before training on real robots
+
+- Collect demonstrations in sim using external devices like gamepads or keyboards
+- Perform human interventions during policy learning
+
+Currently, the main environment is a Franka Panda robot simulation based on MuJoCo, with tasks like picking up a cube.
+
+## Installation
+
+First, install the `gym_hil` package within the LeRobot environment:
+
+```bash
+pip install -e ".[hilserl]"
+```
+
+## What do I need?
+
+- A gamepad or keyboard to control the robot
+- A Nvidia GPU
+
+## Configuration
+
+To use `gym_hil` with LeRobot, you need to create a configuration file. An example is provided [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/gym_hil_env.json). Key configuration sections include:
+
+### Environment Type and Task
+
+```json
+{
+ "type": "hil",
+ "name": "franka_sim",
+ "task": "PandaPickCubeGamepad-v0",
+ "device": "cuda"
+}
+```
+
+Available tasks:
+
+- `PandaPickCubeBase-v0`: Basic environment
+- `PandaPickCubeGamepad-v0`: With gamepad control
+- `PandaPickCubeKeyboard-v0`: With keyboard control
+
+### Gym Wrappers Configuration
+
+```json
+"wrapper": {
+ "gripper_penalty": -0.02,
+ "control_time_s": 15.0,
+ "use_gripper": true,
+ "fixed_reset_joint_positions": [0.0, 0.195, 0.0, -2.43, 0.0, 2.62, 0.785],
+ "end_effector_step_sizes": {
+ "x": 0.025,
+ "y": 0.025,
+ "z": 0.025
+ },
+ "control_mode": "gamepad"
+ }
+```
+
+Important parameters:
+
+- `gripper_penalty`: Penalty for excessive gripper movement
+- `use_gripper`: Whether to enable gripper control
+- `end_effector_step_sizes`: Size of the steps in the x,y,z axes of the end-effector
+- `control_mode`: Set to `"gamepad"` to use a gamepad controller
+
+## Running with HIL RL of LeRobot
+
+### Basic Usage
+
+To run the environment, set mode to null:
+
+
+```python
+python -m lerobot.scripts.rl.gym_manipulator --config_path path/to/gym_hil_env.json
+```
+
+
+### Recording a Dataset
+
+To collect a dataset, set the mode to `record` whilst defining the repo_id and number of episodes to record:
+
+
+```python
+python -m lerobot.scripts.rl.gym_manipulator --config_path path/to/gym_hil_env.json
+```
+
+
+### Training a Policy
+
+To train a policy, checkout the configuration example available [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/train_gym_hil_env.json) and run the actor and learner servers:
+
+
+```python
+python -m lerobot.scripts.rl.actor --config_path path/to/train_gym_hil_env.json
+```
+
+
+In a different terminal, run the learner server:
+
+
+```python
+python -m lerobot.scripts.rl.learner --config_path path/to/train_gym_hil_env.json
+```
+
+
+The simulation environment provides a safe and repeatable way to develop and test your Human-In-the-Loop reinforcement learning components before deploying to real robots.
+
+Congrats 🎉, you have finished this tutorial!
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
+
+Paper citation:
+
+```
+@article{luo2024precise,
+ title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning},
+ author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey},
+ journal={arXiv preprint arXiv:2410.21845},
+ year={2024}
+}
+```
diff --git a/docs/source/hope_jr.mdx b/docs/source/hope_jr.mdx
new file mode 120000
index 0000000000..402422634e
--- /dev/null
+++ b/docs/source/hope_jr.mdx
@@ -0,0 +1 @@
+../../src/lerobot/robots/hope_jr/hope_jr.mdx
\ No newline at end of file
diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx
new file mode 100644
index 0000000000..b18adb8f49
--- /dev/null
+++ b/docs/source/il_robots.mdx
@@ -0,0 +1,591 @@
+# Imitation Learning on Real-World Robots
+
+This tutorial will explain how to train a neural network to control a real robot autonomously.
+
+**You'll learn:**
+
+1. How to record and visualize your dataset.
+2. How to train a policy using your data and prepare it for evaluation.
+3. How to evaluate your policy and visualize the results.
+
+By following these steps, you'll be able to replicate tasks, such as picking up a Lego block and placing it in a bin with a high success rate, as shown in the video below.
+
+
+Video: pickup lego block task
+
+
+
+
+
+
+
+This tutorial isn’t tied to a specific robot: we walk you through the commands and API snippets you can adapt for any supported platform.
+
+During data collection, you’ll use a “teloperation” device, such as a leader arm or keyboard to teleoperate the robot and record its motion trajectories.
+
+Once you’ve gathered enough trajectories, you’ll train a neural network to imitate these trajectories and deploy the trained model so your robot can perform the task autonomously.
+
+If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support.
+
+## Set up and Calibrate
+
+If you haven't yet set up and calibrated your robot and teleop device, please do so by following the robot-specific tutorial.
+
+## Teleoperate
+
+In this example, we’ll demonstrate how to teleoperate the SO101 robot. For each command, we also provide a corresponding API example.
+
+Note that the `id` associated with a robot is used to store the calibration file. It's important to use the same `id` when teleoperating, recording, and evaluating when using the same setup.
+
+
+
+```bash
+python -m lerobot.teleoperate \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=my_awesome_follower_arm \
+ --teleop.type=so101_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=my_awesome_leader_arm
+```
+
+
+
+
+```python
+from lerobot.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader
+from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower
+
+robot_config = SO101FollowerConfig(
+ port="/dev/tty.usbmodem58760431541",
+ id="my_red_robot_arm",
+)
+
+teleop_config = SO101LeaderConfig(
+ port="/dev/tty.usbmodem58760431551",
+ id="my_blue_leader_arm",
+)
+
+robot = SO101Follower(robot_config)
+teleop_device = SO101Leader(teleop_config)
+robot.connect()
+teleop_device.connect()
+
+while True:
+ action = teleop_device.get_action()
+ robot.send_action(action)
+```
+
+
+
+
+
+The teleoperate command will automatically:
+
+1. Identify any missing calibrations and initiate the calibration procedure.
+2. Connect the robot and teleop device and start teleoperation.
+
+## Cameras
+
+To add cameras to your setup, follow this [Guide](./cameras#setup-cameras).
+
+## Teleoperate with cameras
+
+With `rerun`, you can teleoperate again while simultaneously visualizing the camera feeds and joint positions. In this example, we’re using the Koch arm.
+
+
+
+```bash
+python -m lerobot.teleoperate \
+ --robot.type=koch_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=my_awesome_follower_arm \
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
+ --teleop.type=koch_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=my_awesome_leader_arm \
+ --display_data=true
+```
+
+
+
+
+```python
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader
+from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower
+
+camera_config = {
+ "front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30)
+}
+
+robot_config = KochFollowerConfig(
+ port="/dev/tty.usbmodem585A0076841",
+ id="my_red_robot_arm",
+ cameras=camera_config
+)
+
+teleop_config = KochLeaderConfig(
+ port="/dev/tty.usbmodem58760431551",
+ id="my_blue_leader_arm",
+)
+
+robot = KochFollower(robot_config)
+teleop_device = KochLeader(teleop_config)
+robot.connect()
+teleop_device.connect()
+
+while True:
+ observation = robot.get_observation()
+ action = teleop_device.get_action()
+ robot.send_action(action)
+```
+
+
+
+
+
+## Record a dataset
+
+Once you're familiar with teleoperation, you can record your first dataset.
+
+We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
+
+Add your token to the CLI by running this command:
+
+```bash
+huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
+```
+
+Then store your Hugging Face repository name in a variable:
+
+```bash
+HF_USER=$(huggingface-cli whoami | head -n 1)
+echo $HF_USER
+```
+
+Now you can record a dataset. To record 5 episodes and upload your dataset to the hub, adapt the code below for your robot and execute the command or API example.
+
+
+
+```bash
+python -m lerobot.record \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem585A0076841 \
+ --robot.id=my_awesome_follower_arm \
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
+ --teleop.type=so101_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=my_awesome_leader_arm \
+ --display_data=true \
+ --dataset.repo_id=${HF_USER}/record-test \
+ --dataset.num_episodes=5 \
+ --dataset.single_task="Grab the black cube"
+```
+
+
+
+
+```python
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import hw_to_dataset_features
+from lerobot.robots.so100_follower import SO100Follower, SO100FollowerConfig
+from lerobot.teleoperators.so100_leader.config_so100_leader import SO100LeaderConfig
+from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader
+from lerobot.utils.control_utils import init_keyboard_listener
+from lerobot.utils.utils import log_say
+from lerobot.utils.visualization_utils import _init_rerun
+from lerobot.record import record_loop
+
+NUM_EPISODES = 5
+FPS = 30
+EPISODE_TIME_SEC = 60
+RESET_TIME_SEC = 10
+TASK_DESCRIPTION = "My task description"
+
+# Create the robot and teleoperator configurations
+camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
+robot_config = SO100FollowerConfig(
+ port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config
+)
+teleop_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm")
+
+# Initialize the robot and teleoperator
+robot = SO100Follower(robot_config)
+teleop = SO100Leader(teleop_config)
+
+# Configure the dataset features
+action_features = hw_to_dataset_features(robot.action_features, "action")
+obs_features = hw_to_dataset_features(robot.observation_features, "observation")
+dataset_features = {**action_features, **obs_features}
+
+# Create the dataset
+dataset = LeRobotDataset.create(
+ repo_id="/",
+ fps=FPS,
+ features=dataset_features,
+ robot_type=robot.name,
+ use_videos=True,
+ image_writer_threads=4,
+)
+
+# Initialize the keyboard listener and rerun visualization
+_, events = init_keyboard_listener()
+_init_rerun(session_name="recording")
+
+# Connect the robot and teleoperator
+robot.connect()
+teleop.connect()
+
+episode_idx = 0
+while episode_idx < NUM_EPISODES and not events["stop_recording"]:
+ log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
+
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ teleop=teleop,
+ dataset=dataset,
+ control_time_s=EPISODE_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ # Reset the environment if not stopping or re-recording
+ if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]):
+ log_say("Reset the environment")
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ teleop=teleop,
+ control_time_s=RESET_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ if events["rerecord_episode"]:
+ log_say("Re-recording episode")
+ events["rerecord_episode"] = False
+ events["exit_early"] = False
+ dataset.clear_episode_buffer()
+ continue
+
+ dataset.save_episode()
+ episode_idx += 1
+
+# Clean up
+log_say("Stop recording")
+robot.disconnect()
+teleop.disconnect()
+dataset.push_to_hub()
+```
+
+
+
+
+
+#### Dataset upload
+
+Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
+
+```bash
+echo https://huggingface.co/datasets/${HF_USER}/so101_test
+```
+
+Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
+
+You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
+
+You can also push your local dataset to the Hub manually, running:
+
+```bash
+huggingface-cli upload ${HF_USER}/record-test ~/.cache/huggingface/lerobot/{repo-id} --repo-type dataset
+```
+
+#### Record function
+
+The `record` function provides a suite of tools for capturing and managing data during robot operation:
+
+##### 1. Data Storage
+
+- Data is stored using the `LeRobotDataset` format and is stored on disk during recording.
+- By default, the dataset is pushed to your Hugging Face page after recording.
+ - To disable uploading, use `--dataset.push_to_hub=False`.
+
+##### 2. Checkpointing and Resuming
+
+- Checkpoints are automatically created during recording.
+- If an issue occurs, you can resume by re-running the same command with `--resume=true`.
+- To start recording from scratch, **manually delete** the dataset directory.
+
+##### 3. Recording Parameters
+
+Set the flow of data recording using command-line arguments:
+
+- `--dataset.episode_time_s=60`
+ Duration of each data recording episode (default: **60 seconds**).
+- `--dataset.reset_time_s=60`
+ Duration for resetting the environment after each episode (default: **60 seconds**).
+- `--dataset.num_episodes=50`
+ Total number of episodes to record (default: **50**).
+
+##### 4. Keyboard Controls During Recording
+
+Control the data recording flow using keyboard shortcuts:
+
+- Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next.
+- Press **Left Arrow (`←`)**: Cancel the current episode and re-record it.
+- Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset.
+
+#### Tips for gathering data
+
+Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images.
+
+In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
+
+Avoid adding too much variation too quickly, as it may hinder your results.
+
+If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset.
+
+#### Troubleshooting:
+
+- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
+
+## Visualize a dataset
+
+If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
+
+```bash
+echo ${HF_USER}/so101_test
+```
+
+## Replay an episode
+
+A useful feature is the `replay` function, which allows you to replay any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
+
+You can replay the first episode on your robot with either the command below or with the API example:
+
+
+
+```bash
+python -m lerobot.replay \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=my_awesome_follower_arm \
+ --dataset.repo_id=${HF_USER}/record-test \
+ --dataset.episode=0 # choose the episode you want to replay
+```
+
+
+
+
+```python
+import time
+
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
+from lerobot.robots.so100_follower.so100_follower import SO100Follower
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import log_say
+
+episode_idx = 0
+
+robot_config = SO100FollowerConfig(port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm")
+
+robot = SO100Follower(robot_config)
+robot.connect()
+
+dataset = LeRobotDataset("/", episodes=[episode_idx])
+actions = dataset.hf_dataset.select_columns("action")
+
+log_say(f"Replaying episode {episode_idx}")
+for idx in range(dataset.num_frames):
+ t0 = time.perf_counter()
+
+ action = {
+ name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"])
+ }
+ robot.send_action(action)
+
+ busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0))
+
+robot.disconnect()
+```
+
+
+
+
+
+Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
+
+## Train a policy
+
+To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
+
+```bash
+python -m lerobot.scripts.train \
+ --dataset.repo_id=${HF_USER}/so101_test \
+ --policy.type=act \
+ --output_dir=outputs/train/act_so101_test \
+ --job_name=act_so101_test \
+ --policy.device=cuda \
+ --wandb.enable=true \
+ --policy.repo_id=${HF_USER}/my_policy
+```
+
+Let's explain the command:
+
+1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`.
+2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
+3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
+4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
+
+Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`.
+
+To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy:
+
+```bash
+python -m lerobot.scripts.train \
+ --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \
+ --resume=true
+```
+
+If you do not want to push your model to the hub after training use `--policy.push_to_hub=false`.
+
+Additionally you can provide extra `tags` or specify a `license` for your model or make the model repo `private` by adding this: `--policy.private=true --policy.tags=\[ppo,rl\] --policy.license=mit`
+
+#### Train using Collab
+
+If your local computer doesn't have a powerful GPU you could utilize Google Collab to train your model by following the [ACT training notebook](./notebooks#training-act).
+
+#### Upload policy checkpoints
+
+Once training is done, upload the latest checkpoint with:
+
+```bash
+huggingface-cli upload ${HF_USER}/act_so101_test \
+ outputs/train/act_so101_test/checkpoints/last/pretrained_model
+```
+
+You can also upload intermediate checkpoints with:
+
+```bash
+CKPT=010000
+huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \
+ outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model
+```
+
+## Run inference and evaluate your policy
+
+You can use the `record` script from [`lerobot/record.py`](https://github.com/huggingface/lerobot/blob/main/lerobot/record.py) with a policy checkpoint as input, to run inference and evaluate your policy. For instance, run this command or API example to run inference and record 10 evaluation episodes:
+
+
+
+```bash
+python -m lerobot.record \
+ --robot.type=so100_follower \
+ --robot.port=/dev/ttyACM1 \
+ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \
+ --robot.id=my_awesome_follower_arm \
+ --display_data=false \
+ --dataset.repo_id=${HF_USER}/eval_so100 \
+ --dataset.single_task="Put lego brick into the transparent box" \
+ # <- Teleop optional if you want to teleoperate in between episodes \
+ # --teleop.type=so100_leader \
+ # --teleop.port=/dev/ttyACM0 \
+ # --teleop.id=my_awesome_leader_arm \
+ --policy.path=${HF_USER}/my_policy
+```
+
+
+
+
+```python
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import hw_to_dataset_features
+from lerobot.policies.act.modeling_act import ACTPolicy
+from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
+from lerobot.robots.so100_follower.so100_follower import SO100Follower
+from lerobot.utils.control_utils import init_keyboard_listener
+from lerobot.utils.utils import log_say
+from lerobot.utils.visualization_utils import _init_rerun
+from lerobot.record import record_loop
+
+NUM_EPISODES = 5
+FPS = 30
+EPISODE_TIME_SEC = 60
+TASK_DESCRIPTION = "My task description"
+
+# Create the robot configuration
+camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
+robot_config = SO100FollowerConfig(
+ port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config
+)
+
+# Initialize the robot
+robot = SO100Follower(robot_config)
+
+# Initialize the policy
+policy = ACTPolicy.from_pretrained("/")
+
+# Configure the dataset features
+action_features = hw_to_dataset_features(robot.action_features, "action")
+obs_features = hw_to_dataset_features(robot.observation_features, "observation")
+dataset_features = {**action_features, **obs_features}
+
+# Create the dataset
+dataset = LeRobotDataset.create(
+ repo_id="/eval_",
+ fps=FPS,
+ features=dataset_features,
+ robot_type=robot.name,
+ use_videos=True,
+ image_writer_threads=4,
+)
+
+# Initialize the keyboard listener and rerun visualization
+_, events = init_keyboard_listener()
+_init_rerun(session_name="recording")
+
+# Connect the robot
+robot.connect()
+
+for episode_idx in range(NUM_EPISODES):
+ log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}")
+
+ # Run the policy inference loop
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ policy=policy,
+ dataset=dataset,
+ control_time_s=EPISODE_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ dataset.save_episode()
+
+# Clean up
+robot.disconnect()
+dataset.push_to_hub()
+```
+
+
+
+
+
+As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
+
+1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`).
+2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`).
diff --git a/docs/source/il_sim.mdx b/docs/source/il_sim.mdx
new file mode 100644
index 0000000000..193b09b1bf
--- /dev/null
+++ b/docs/source/il_sim.mdx
@@ -0,0 +1,172 @@
+# Imitation Learning in Sim
+
+This tutorial will explain how to train a neural network to control a robot in simulation with imitation learning.
+
+**You'll learn:**
+
+1. How to record a dataset in simulation with [gym-hil](https://github.com/huggingface/gym-hil) and visualize the dataset.
+2. How to train a policy using your data.
+3. How to evaluate your policy in simulation and visualize the results.
+
+For the simulation environment we use the same [repo](https://github.com/huggingface/gym-hil) that is also being used by the Human-In-the-Loop (HIL) reinforcement learning algorithm.
+This environment is based on [MuJoCo](https://mujoco.org) and allows you to record datasets in LeRobotDataset format.
+Teleoperation is easiest with a controller like the Logitech F710, but you can also use your keyboard if you are up for the challenge.
+
+## Installation
+
+First, install the `gym_hil` package within the LeRobot environment, go to your LeRobot folder and run this command:
+
+```bash
+pip install -e ".[hilserl]"
+```
+
+## Teleoperate and Record a Dataset
+
+To use `gym_hil` with LeRobot, you need to use a configuration file. An example config file can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_gym_hil_il.json).
+
+To teleoperate and collect a dataset, we need to modify this config file and you should add your `repo_id` here: `"repo_id": "il_gym",` and `"num_episodes": 30,` and make sure you set `mode` to `record`, "mode": "record".
+
+If you do not have a Nvidia GPU also change `"device": "cuda"` parameter in the config file (for example to `mps` for MacOS).
+
+By default the config file assumes you use a controller. To use your keyboard please change the envoirment specified at `"task"` in the config file and set it to `"PandaPickCubeKeyboard-v0"`.
+
+Then we can run this command to start:
+
+
+
+
+```bash
+python -m lerobot.scripts.rl.gym_manipulator --config_path path/to/env_config_gym_hil_il.json
+```
+
+
+
+
+```bash
+mjpython -m lerobot.scripts.rl.gym_manipulator --config_path path/to/env_config_gym_hil_il.json
+```
+
+
+
+
+Once rendered you can teleoperate the robot with the gamepad or keyboard, below you can find the gamepad/keyboard controls.
+
+Note that to teleoperate the robot you have to hold the "Human Take Over Pause Policy" Button `RB` to enable control!
+
+**Gamepad Controls**
+
+
+
+
+
+ Gamepad button mapping for robot control and episode management
+
+
+**Keyboard controls**
+
+For keyboard controls use the `spacebar` to enable control and the following keys to move the robot:
+
+```bash
+ Arrow keys: Move in X-Y plane
+ Shift and Shift_R: Move in Z axis
+ Right Ctrl and Left Ctrl: Open and close gripper
+ ESC: Exit
+```
+
+## Visualize a dataset
+
+If you uploaded your dataset to the hub you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id.
+
+
+
+
+
+ Dataset visualizer
+
+
+## Train a policy
+
+To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
+
+```bash
+python -m lerobot.scripts.train \
+ --dataset.repo_id=${HF_USER}/il_gym \
+ --policy.type=act \
+ --output_dir=outputs/train/il_sim_test \
+ --job_name=il_sim_test \
+ --policy.device=cuda \
+ --wandb.enable=true
+```
+
+Let's explain the command:
+
+1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/il_gym`.
+2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
+3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
+4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
+
+Training should take several hours, 100k steps (which is the default) will take about 1h on Nvidia A100. You will find checkpoints in `outputs/train/il_sim_test/checkpoints`.
+
+#### Train using Collab
+
+If your local computer doesn't have a powerful GPU you could utilize Google Collab to train your model by following the [ACT training notebook](./notebooks#training-act).
+
+#### Upload policy checkpoints
+
+Once training is done, upload the latest checkpoint with:
+
+```bash
+huggingface-cli upload ${HF_USER}/il_sim_test \
+ outputs/train/il_sim_test/checkpoints/last/pretrained_model
+```
+
+You can also upload intermediate checkpoints with:
+
+```bash
+CKPT=010000
+huggingface-cli upload ${HF_USER}/il_sim_test${CKPT} \
+ outputs/train/il_sim_test/checkpoints/${CKPT}/pretrained_model
+```
+
+## Evaluate your policy in Sim
+
+To evaluate your policy we have to use the config file that can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/eval_config_gym_hil.json).
+
+Make sure to replace the `repo_id` with the dataset you trained on, for example `pepijn223/il_sim_dataset` and replace the `pretrained_policy_name_or_path` with your model id, for example `pepijn223/il_sim_model`
+
+Then you can run this command to visualize your trained policy
+
+
+
+
+```bash
+python -m lerobot.scripts.rl.eval_policy --config_path=path/to/eval_config_gym_hil.json
+```
+
+
+
+
+```bash
+mjpython -m lerobot.scripts.rl.eval_policy --config_path=path/to/eval_config_gym_hil.json
+```
+
+
+
+
+> [!WARNING]
+> While the main workflow of training ACT in simulation is straightforward, there is significant room for exploring how to set up the task, define the initial state of the environment, and determine the type of data required during collection to learn the most effective policy. If your trained policy doesn't perform well, investigate the quality of the dataset it was trained on using our visualizers, as well as the action values and various hyperparameters related to ACT and the simulation.
+
+Congrats 🎉, you have finished this tutorial. If you want to continue with using LeRobot in simulation follow this [Tutorial on reinforcement learning in sim with HIL-SERL](https://huggingface.co/docs/lerobot/hilserl_sim)
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
diff --git a/docs/source/index.mdx b/docs/source/index.mdx
new file mode 100644
index 0000000000..a2f919e7d8
--- /dev/null
+++ b/docs/source/index.mdx
@@ -0,0 +1,23 @@
+
+
+# LeRobot
+
+**State-of-the-art machine learning for real-world robotics**
+
+🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
+
+🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
+
+🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started.
+
+🤗 LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page.
+
+Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb)
diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx
new file mode 100644
index 0000000000..13c3600b4f
--- /dev/null
+++ b/docs/source/installation.mdx
@@ -0,0 +1,89 @@
+# Installation
+
+## Install LeRobot
+
+Currently only available from source.
+
+Download our source code:
+
+```bash
+git clone https://github.com/huggingface/lerobot.git
+cd lerobot
+```
+
+Create a virtual environment with Python 3.10, using [`Miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install)
+
+```bash
+conda create -y -n lerobot python=3.10
+```
+
+Then activate your conda environment, you have to do this each time you open a shell to use lerobot:
+
+```bash
+conda activate lerobot
+```
+
+When using `miniconda`, install `ffmpeg` in your environment:
+
+```bash
+conda install ffmpeg -c conda-forge
+```
+
+> [!TIP]
+> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
+>
+> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
+>
+> ```bash
+> conda install ffmpeg=7.1.1 -c conda-forge
+> ```
+>
+> - _[On Linux only]_ If you want to bring your own ffmpeg: Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
+
+Install 🤗 LeRobot:
+
+```bash
+pip install -e .
+```
+
+### Troubleshooting
+
+If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
+To install these for linux run:
+
+```bash
+sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config
+```
+
+For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
+
+## Optional dependencies
+
+LeRobot provides optional extras for specific functionalities. Multiple extras can be combined (e.g., `.[aloha,feetech]`). For all available extras, refer to `pyproject.toml`.
+
+### Simulations
+
+Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), `xarm` ([gym-xarm](https://github.com/huggingface/gym-xarm)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht))
+Example:
+
+```bash
+pip install -e ".[aloha]" # or "[pusht]" for example
+```
+
+### Motor Control
+
+For Koch v1.1 install the Dynamixel SDK, for SO100/SO101/Moss install the Feetech SDK.
+
+```bash
+pip install -e ".[feetech]" # or "[dynamixel]" for example
+```
+
+### Experiment Tracking
+
+To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
+
+```bash
+wandb login
+```
+
+You can now assemble your robot if it's not ready yet, look for your robot type on the left. Then follow the link below to use Lerobot with your robot.
diff --git a/docs/source/integrate_hardware.mdx b/docs/source/integrate_hardware.mdx
new file mode 100644
index 0000000000..089126fcb1
--- /dev/null
+++ b/docs/source/integrate_hardware.mdx
@@ -0,0 +1,346 @@
+# Bring Your Own Hardware
+
+This tutorial will explain how to integrate your own robot design into the LeRobot ecosystem and have it access all of our tools (data collection, control pipelines, policy training and inference).
+
+To that end, we provide the [`Robot`](https://github.com/huggingface/lerobot/blob/main/lerobot/robots/robot.py) base class in the LeRobot which specifies a standard interface for physical robot integration. Let's see how to implement it.
+
+## Prerequisites
+
+- Your own robot which exposes a communication interface (e.g. serial, CAN, TCP)
+- A way to read sensor data and send motor commands programmatically, e.g. manufacturer's SDK or API, or your own protocol implementation.
+- LeRobot installed in your environment. Follow our [Installation Guide](./installation).
+
+## Choose your motors
+
+If you're using Feetech or Dynamixel motors, LeRobot provides built-in bus interfaces:
+
+- [`FeetechMotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/motors/feetech/feetech.py) – for controlling Feetech servos
+- [`DynamixelMotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/motors/dynamixel/dynamixel.py) – for controlling Dynamixel servos
+
+Please refer to the [`MotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/motors/motors_bus.py) abstract class to learn about its API.
+For a good example of how it can be used, you can have a look at our own [SO101 follower implementation](https://github.com/huggingface/lerobot/blob/main/lerobot/robots/so101_follower/so101_follower.py)
+
+Use these if compatible. Otherwise, you'll need to find or write a Python interface (not covered in this tutorial):
+
+- Find an existing SDK in Python (or use bindings to C/C++)
+- Or implement a basic communication wrapper (e.g., via pyserial, socket, or CANopen)
+
+You're not alone—many community contributions use custom boards or firmware!
+
+For Feetech and Dynamixel, we currently support these servos: - Feetech: - STS & SMS series (protocol 0): `sts3215`, `sts3250`, `sm8512bl` - SCS series (protocol 1): `scs0009` - Dynamixel (protocol 2.0 only): `xl330-m077`, `xl330-m288`, `xl430-w250`, `xm430-w350`, `xm540-w270`, `xc430-w150`
+
+If you are using Feetech or Dynamixel servos that are not in this list, you can add those in the [Feetech table](https://github.com/huggingface/lerobot/blob/main/lerobot/motors/feetech/tables.py) or [Dynamixel table](https://github.com/huggingface/lerobot/blob/main/lerobot/motors/dynamixel/tables.py). Depending on the model, this will require you to add model-specific information. In most cases though, there shouldn't be a lot of additions to do.
+
+In the next sections, we'll use a `FeetechMotorsBus` as the motors interface for the examples. Replace it and adapt to your motors if necessary.
+
+## Step 1: Subclass the `Robot` Interface
+
+You’ll first need to specify the config class and a string identifier (`name`) for your robot. If your robot has special needs that you'd like to be able to change easily, it should go here (e.g. port/address, baudrate).
+
+Here, we'll add the port name and one camera by default for our robot:
+
+
+```python
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+from lerobot.cameras.opencv import OpenCVCameraConfig
+from lerobot.robots import RobotConfig
+
+
+@RobotConfig.register_subclass("my_cool_robot")
+@dataclass
+class MyCoolRobotConfig(RobotConfig):
+ port: str
+ cameras: dict[str, CameraConfig] = field(
+ default_factory={
+ "cam_1": OpenCVCameraConfig(
+ index_or_path=2,
+ fps=30,
+ width=480,
+ height=640,
+ ),
+ }
+ )
+```
+
+
+Have a look at our [Cameras tutorial](./cameras) to understand how to detect and add your camera.
+
+Next, we'll create our actual robot class which inherits from `Robot`. This abstract class defines a contract you must follow for your robot to be usable with the rest of the LeRobot tools.
+
+Here we'll create a simple 5-DoF robot with one camera. It could be a simple arm but notice that the `Robot` abstract class does not assume anything on your robot's form factor. You can let you imagination run wild when designing new robots!
+
+
+```python
+from lerobot.cameras import make_cameras_from_configs
+from lerobot.motors import Motor, MotorNormMode
+from lerobot.motors.feetech import FeetechMotorsBus
+from lerobot.robots import Robot
+
+class MyCoolRobot(Robot):
+ config_class = MyCoolRobotConfig
+ name = "my_cool_robot"
+
+ def __init__(self, config: MyCoolRobotConfig):
+ super().__init__(config)
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "joint_1": Motor(1, "sts3250", MotorNormMode.RANGE_M100_100),
+ "joint_2": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100),
+ "joint_3": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100),
+ "joint_4": Motor(4, "sts3215", MotorNormMode.RANGE_M100_100),
+ "joint_5": Motor(5, "sts3215", MotorNormMode.RANGE_M100_100),
+ },
+ calibration=self.calibration,
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+```
+
+
+## Step 2: Define Observation and Action Features
+
+These two properties define the _interface contract_ between your robot and tools that consume it (such as data collection or learning pipelines).
+
+> [!WARNING]
+> Note that these properties must be callable even if the robot is not yet connected, so avoid relying on runtime hardware state to define them.
+
+### `observation_features`
+
+This property should return a dictionary describing the structure of sensor outputs from your robot. The keys match what `get_observation()` returns, and the values describe either the shape (for arrays/images) or the type (for simple values).
+
+Example for our 5-DoF arm with one camera:
+
+
+```python
+@property
+def _motors_ft(self) -> dict[str, type]:
+ return {
+ "joint_1.pos": float,
+ "joint_2.pos": float,
+ "joint_3.pos": float,
+ "joint_4.pos": float,
+ "joint_5.pos": float,
+ }
+
+@property
+def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.cameras[cam].height, self.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+@property
+def observation_features(self) -> dict:
+ return {**self._motors_ft, **self._cameras_ft}
+```
+
+
+In this case, observations consist of a simple dict storing each motor's position and a camera image.
+
+### `action_features`
+
+This property describes the commands your robot expects via `send_action()`. Again, keys must match the expected input format, and values define the shape/type of each command.
+
+Here, we simply use the same joints proprioceptive features (`self._motors_ft`) as with `observation_features`: the action sent will simply the goal position for each motor.
+
+
+```python
+def action_features(self) -> dict:
+ return self._motors_ft
+```
+
+
+## Step 3: Handle Connection and Disconnection
+
+These methods should handle opening and closing communication with your hardware (e.g. serial ports, CAN interfaces, USB devices, cameras).
+
+### `is_connected`
+
+This property should simply reflect that communication with the robot's hardware is established. When this property is `True`, it should be possible to read and write to the hardware using `get_observation()` and `send_action()`.
+
+
+```python
+@property
+def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+```
+
+
+### `connect()`
+
+This method should establish communication with the hardware. Moreover, if your robot needs calibration and is not calibrated, it should start a calibration procedure by default. If your robot needs some specific configuration, this should also be called here.
+
+
+```python
+def connect(self, calibrate: bool = True) -> None:
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+```
+
+
+### `disconnect()`
+
+This method should gracefully terminate communication with the hardware: free any related resources (threads or processes), close ports, etc.
+
+Here, we already handle this in our `MotorsBus` and `Camera` classes so we just need to call their own `disconnect()` methods:
+
+
+```python
+def disconnect(self) -> None:
+ self.bus.disconnect()
+ for cam in self.cameras.values():
+ cam.disconnect()
+```
+
+
+## Step 4: Support Calibration and Configuration
+
+LeRobot supports saving and loading calibration data automatically. This is useful for joint offsets, zero positions, or sensor alignment.
+
+> Note that depending on your hardware, this may not apply. If that's the case, you can simply leave these methods as no-ops:
+
+
+```python
+> @property
+> def is_calibrated(self) -> bool:
+> return True
+>
+> def calibrate(self) -> None:
+> pass
+> ```
+
+### `is_calibrated`
+
+This should reflect whether your robot has the required calibration loaded.
+
+```
+python
+@property
+def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+```
+
+### `calibrate()`
+
+The goal of the calibration is twofold:
+ - Know the physical range of motion of each motors in order to only send commands within this range.
+ - Normalize raw motors positions to sensible continuous values (e.g. percentages, degrees) instead of arbitrary discrete value dependant on the specific motor used that will not replicate elsewhere.
+
+It should implement the logic for calibration (if relevant) and update the `self.calibration` dictionary. If you are using Feetech or Dynamixel motors, our bus interfaces already include methods to help with this.
+
+
+
+```python
+def calibrate(self) -> None:
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ print(
+ "Move all joints sequentially through their entire ranges "
+ "of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion()
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+```
+
+
+### `configure()`
+
+Use this to set up any configuration for your hardware (servos control modes, controller gains, etc.). This should usually be run at connection time and be idempotent.
+
+
+```python
+def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+ self.bus.write("P_Coefficient", motor, 16)
+ self.bus.write("I_Coefficient", motor, 0)
+ self.bus.write("D_Coefficient", motor, 32)
+```
+
+
+## Step 5: Implement Sensors Reading and Action Sending
+
+These are the most important runtime functions: the core I/O loop.
+
+### `get_observation()`
+
+Returns a dictionary of sensor values from the robot. These typically include motor states, camera frames, various sensors, etc. In the LeRobot framework, these observations are what will be fed to a policy in order to predict the actions to take. The dictionary keys and structure must match `observation_features`.
+
+
+```python
+def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise ConnectionError(f"{self} is not connected.")
+
+ # Read arm position
+ obs_dict = self.bus.sync_read("Present_Position")
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ obs_dict[cam_key] = cam.async_read()
+
+ return obs_dict
+```
+
+
+### `send_action()`
+
+Takes a dictionary that matches `action_features`, and sends it to your hardware. You can add safety limits (clipping, smoothing) and return what was actually sent.
+
+For simplicity, we won't be adding any modification of the actions in our example here.
+
+
+```python
+def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items()}
+
+ # Send goal position to the arm
+ self.bus.sync_write("Goal_Position", goal_pos)
+
+ return action
+```
+
+
+## Adding a Teleoperator
+
+For implementing teleoperation devices, we also provide a [`Teleoperator`](https://github.com/huggingface/lerobot/blob/main/lerobot/teleoperators/teleoperator.py) base class. This class is very similar to the `Robot` base class and also doesn't assume anything on form factor.
+
+The main differences are in the I/O functions: a teleoperator allows you to produce action via `get_action` and can receive feedback actions via `send_feedback`. Feedback could be anything controllable on the teleoperation device that could help the person controlling it understand the consequences of the actions sent. Think motion/force feedback on a leader arm, vibrations on a gamepad controller for example. To implement a teleoperator, you can follow this same tutorial and adapt it for these two methods.
+
+## Wrapping Up
+
+Once your robot class is complete, you can leverage the LeRobot ecosystem:
+
+- Control your robot with available teleoperators or integrate directly your teleoperating device
+- Record training data and visualize it
+- Integrate it into RL or imitation learning pipelines
+
+Don't hesitate to reach out to the community for help on our [Discord](https://discord.gg/s3KuuzsPFb) 🤗
diff --git a/docs/source/koch.mdx b/docs/source/koch.mdx
new file mode 120000
index 0000000000..5383518b3d
--- /dev/null
+++ b/docs/source/koch.mdx
@@ -0,0 +1 @@
+../../src/lerobot/robots/koch_follower/koch.mdx
\ No newline at end of file
diff --git a/docs/source/lekiwi.mdx b/docs/source/lekiwi.mdx
new file mode 120000
index 0000000000..afc43077e4
--- /dev/null
+++ b/docs/source/lekiwi.mdx
@@ -0,0 +1 @@
+../../src/lerobot/robots/lekiwi/lekiwi.mdx
\ No newline at end of file
diff --git a/docs/source/notebooks.mdx b/docs/source/notebooks.mdx
new file mode 100644
index 0000000000..6a9c3b103c
--- /dev/null
+++ b/docs/source/notebooks.mdx
@@ -0,0 +1,29 @@
+# 🤗 LeRobot Notebooks
+
+This repository contains example notebooks for using LeRobot. These notebooks demonstrate how to train policies on real or simulation datasets using standardized policies.
+
+---
+
+### Training ACT
+
+[ACT](https://huggingface.co/papers/2304.13705) (Action Chunking Transformer) is a transformer-based policy architecture for imitation learning that processes robot states and camera inputs to generate smooth, chunked action sequences.
+
+We provide a ready-to-run Google Colab notebook to help you train ACT policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases.
+
+| Notebook | Colab |
+| :------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [Train ACT with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) |
+
+Expected training time for 100k steps: ~1.5 hours on an NVIDIA A100 GPU with batch size of `64`.
+
+### Training SmolVLA
+
+[SmolVLA](https://huggingface.co/papers/2506.01844) is a small but efficient Vision-Language-Action model. It is compact in size with 450 M-parameter and is developed by Hugging Face.
+
+We provide a ready-to-run Google Colab notebook to help you train SmolVLA policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases.
+
+| Notebook | Colab |
+| :-------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| [Train SmolVLA with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) |
+
+Expected training time for 20k steps: ~5 hours on an NVIDIA A100 GPU with batch size of `64`.
diff --git a/docs/source/smolvla.mdx b/docs/source/smolvla.mdx
new file mode 100644
index 0000000000..880beaa1ac
--- /dev/null
+++ b/docs/source/smolvla.mdx
@@ -0,0 +1,116 @@
+# Finetune SmolVLA
+
+SmolVLA is Hugging Face’s lightweight foundation model for robotics. Designed for easy fine-tuning on LeRobot datasets, it helps accelerate your development!
+
+
+
+
+
+ Figure 1. SmolVLA takes as input (i) multiple cameras views, (ii) the
+ robot’s current sensorimotor state, and (iii) a natural language
+ instruction, encoded into contextual features used to condition the action
+ expert when generating an action chunk.
+
+
+
+## Set Up Your Environment
+
+1. Install LeRobot by following our [Installation Guide](./installation).
+2. Install SmolVLA dependencies by running:
+
+ ```bash
+ pip install -e ".[smolvla]"
+ ```
+
+## Collect a dataset
+
+SmolVLA is a base model, so fine-tuning on your own data is required for optimal performance in your setup.
+We recommend recording ~50 episodes of your task as a starting point. Follow our guide to get started: [Recording a Dataset](https://huggingface.co/docs/lerobot/getting_started_real_world_robot#record-a-dataset)
+
+
+
+In your dataset, make sure to have enough demonstrations per each variation (e.g. the cube position on the table if it is cube pick-place task) you are introducing.
+
+We recommend checking out the dataset linked below for reference that was used in the [SmolVLA paper](https://huggingface.co/papers/2506.01844):
+
+🔗 [SVLA SO100 PickPlace](https://huggingface.co/spaces/lerobot/visualize_dataset?path=%2Flerobot%2Fsvla_so100_pickplace%2Fepisode_0)
+
+In this dataset, we recorded 50 episodes across 5 distinct cube positions. For each position, we collected 10 episodes of pick-and-place interactions. This structure, repeating each variation several times, helped the model generalize better. We tried similar dataset with 25 episodes, and it was not enough leading to a bad performance. So, the data quality and quantity is definitely a key.
+After you have your dataset available on the Hub, you are good to go to use our finetuning script to adapt SmolVLA to your application.
+
+
+
+## Finetune SmolVLA on your data
+
+Use [`smolvla_base`](https://hf.co/lerobot/smolvla_base), our pretrained 450M model, and fine-tune it on your data.
+Training the model for 20k steps will roughly take ~4 hrs on a single A100 GPU. You should tune the number of steps based on performance and your use-case.
+
+If you don't have a gpu device, you can train using our notebook on [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb)
+
+Pass your dataset to the training script using `--dataset.repo_id`. If you want to test your installation, run the following command where we use one of the datasets we collected for the [SmolVLA Paper](https://huggingface.co/papers/2506.01844).
+
+```bash
+cd lerobot && python -m lerobot.scripts.train \
+ --policy.path=lerobot/smolvla_base \
+ --dataset.repo_id=${HF_USER}/mydataset \
+ --batch_size=64 \
+ --steps=20000 \
+ --output_dir=outputs/train/my_smolvla \
+ --job_name=my_smolvla_training \
+ --policy.device=cuda \
+ --wandb.enable=true
+```
+
+
+ You can start with a small batch size and increase it incrementally, if the
+ GPU allows it, as long as loading times remain short.
+
+
+Fine-tuning is an art. For a complete overview of the options for finetuning, run
+
+```bash
+python -m lerobot.scripts.train --help
+```
+
+
+
+
+
+ Figure 2: Comparison of SmolVLA across task variations. From left to right:
+ (1) pick-place cube counting, (2) pick-place cube counting, (3) pick-place
+ cube counting under perturbations, and (4) generalization on pick-and-place
+ of the lego block with real-world SO101.
+
+
+
+## Evaluate the finetuned model and run it in real-time
+
+Similarly for when recording an episode, it is recommended that you are logged in to the HuggingFace Hub. You can follow the corresponding steps: [Record a dataset](./getting_started_real_world_robot#record-a-dataset).
+Once you are logged in, you can run inference in your setup by doing:
+
+```bash
+python -m lerobot.record \
+ --robot.type=so101_follower \
+ --robot.port=/dev/ttyACM0 \ # <- Use your port
+ --robot.id=my_blue_follower_arm \ # <- Use your robot id
+ --robot.cameras="{ front: {type: opencv, index_or_path: 8, width: 640, height: 480, fps: 30}}" \ # <- Use your cameras
+ --dataset.single_task="Grasp a lego block and put it in the bin." \ # <- Use the same task description you used in your dataset recording
+ --dataset.repo_id=${HF_USER}/eval_DATASET_NAME_test \ # <- This will be the dataset name on HF Hub
+ --dataset.episode_time_s=50 \
+ --dataset.num_episodes=10 \
+ # <- Teleop optional if you want to teleoperate in between episodes \
+ # --teleop.type=so100_leader \
+ # --teleop.port=/dev/ttyACM0 \
+ # --teleop.id=my_red_leader_arm \
+ --policy.path=HF_USER/FINETUNE_MODEL_NAME # <- Use your fine-tuned model
+```
+
+Depending on your evaluation setup, you can configure the duration and the number of episodes to record for your evaluation suite.
diff --git a/docs/source/so100.mdx b/docs/source/so100.mdx
new file mode 120000
index 0000000000..0a71dc3079
--- /dev/null
+++ b/docs/source/so100.mdx
@@ -0,0 +1 @@
+../../src/lerobot/robots/so100_follower/so100.mdx
\ No newline at end of file
diff --git a/docs/source/so101.mdx b/docs/source/so101.mdx
new file mode 120000
index 0000000000..ab6d0ac61c
--- /dev/null
+++ b/docs/source/so101.mdx
@@ -0,0 +1 @@
+../../src/lerobot/robots/so101_follower/so101.mdx
\ No newline at end of file
diff --git a/examples/10_use_so100.md b/examples/10_use_so100.md
deleted file mode 100644
index 155bbe5198..0000000000
--- a/examples/10_use_so100.md
+++ /dev/null
@@ -1,296 +0,0 @@
-# Using the [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot
-
-
-## A. Source the parts
-
-Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
-
-**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
-
-## B. Install LeRobot
-
-On your computer:
-
-1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
-```bash
-mkdir -p ~/miniconda3
-# Linux:
-wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
-# Mac M-series:
-# curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh
-# Mac Intel:
-# curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/miniconda3/miniconda.sh
-bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
-rm ~/miniconda3/miniconda.sh
-~/miniconda3/bin/conda init bash
-```
-
-2. Restart shell or `source ~/.bashrc` (*Mac*: `source ~/.bash_profile`) or `source ~/.zshrc` if you're using zshell
-
-3. Create and activate a fresh conda environment for lerobot
-```bash
-conda create -y -n lerobot python=3.10 && conda activate lerobot
-```
-
-4. Clone LeRobot:
-```bash
-git clone https://github.com/huggingface/lerobot.git ~/lerobot
-```
-
-5. Install LeRobot with dependencies for the feetech motors:
-```bash
-cd ~/lerobot && pip install -e ".[feetech]"
-```
-
-*For Linux only (not Mac)*: install extra dependencies for recording datasets:
-```bash
-conda install -y -c conda-forge ffmpeg
-pip uninstall -y opencv-python
-conda install -y -c conda-forge "opencv>=4.10.0"
-```
-
-## C. Configure the motors
-
-### 1. Find the USB ports associated to each arm
-
-Designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm.
-
-#### a. Run the script to find ports
-
-Follow Step 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I), which illustrates the use of our scripts below.
-
-To find the port for each bus servo adapter, run the utility script:
-```bash
-python lerobot/scripts/find_motors_bus_port.py
-```
-
-#### b. Example outputs
-
-Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
-```
-Finding all available ports for the MotorBus.
-['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
-
-[...Disconnect leader arm and press Enter...]
-
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
-Reconnect the usb cable.
-```
-Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
-```
-Finding all available ports for the MotorBus.
-['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
-
-[...Disconnect follower arm and press Enter...]
-
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
-Reconnect the usb cable.
-```
-
-#### c. Troubleshooting
-On Linux, you might need to give access to the USB ports by running:
-```bash
-sudo chmod 666 /dev/ttyACM0
-sudo chmod 666 /dev/ttyACM1
-```
-
-#### d. Update YAML file
-
-Now that you have the ports, modify the *port* sections in `so100.yaml`
-
-### 2. Configure the motors
-
-#### a. Set IDs for all 12 motors
-Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem58760432961 \
- --brand feetech \
- --model sts3215 \
- --baudrate 1000000 \
- --ID 1
-```
-
-*Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).*
-
-Then unplug your motor and plug the second motor and set its ID to 2.
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem58760432961 \
- --brand feetech \
- --model sts3215 \
- --baudrate 1000000 \
- --ID 2
-```
-
-Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
-
-
-#### b. Remove the gears of the 6 leader motors
-
-Follow step 2 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=248). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
-
-#### c. Add motor horn to all 12 motors
-Follow step 3 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=569). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
-Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
-
-## D. Assemble the arms
-
-Follow step 4 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=610). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
-
-## E. Calibrate
-
-Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
-
-#### a. Manual calibration of follower arm
-/!\ Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
-
-You will need to move the follower arm to these positions sequentially:
-
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
-| | | |
-
-Make sure both arms are connected and run this script to launch manual calibration:
-```bash
-python lerobot/scripts/control_robot.py calibrate \
- --robot-path lerobot/configs/robot/so100.yaml \
- --robot-overrides '~cameras' --arms main_follower
-```
-
-#### b. Manual calibration of leader arm
-Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
-
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
-| | | |
-
-Run this script to launch manual calibration:
-```bash
-python lerobot/scripts/control_robot.py calibrate \
- --robot-path lerobot/configs/robot/so100.yaml \
- --robot-overrides '~cameras' --arms main_leader
-```
-
-## F. Teleoperate
-
-**Simple teleop**
-Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/so100.yaml \
- --robot-overrides '~cameras' \
- --display-cameras 0
-```
-
-
-#### a. Teleop with displaying cameras
-Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/so100.yaml
-```
-
-## G. Record a dataset
-
-Once you're familiar with teleoperation, you can record your first dataset with SO-100.
-
-If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
-```bash
-huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
-```
-
-Store your Hugging Face repository name in a variable to run these commands:
-```bash
-HF_USER=$(huggingface-cli whoami | head -n 1)
-echo $HF_USER
-```
-
-Record 2 episodes and upload your dataset to the hub:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/so100.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/so100_test \
- --tags so100 tutorial \
- --warmup-time-s 5 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 2 \
- --push-to-hub 1
-```
-
-## H. Visualize a dataset
-
-If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
-```bash
-echo ${HF_USER}/so100_test
-```
-
-If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
-```bash
-python lerobot/scripts/visualize_dataset_html.py \
- --repo-id ${HF_USER}/so100_test
-```
-
-## I. Replay an episode
-
-Now try to replay the first episode on your robot:
-```bash
-python lerobot/scripts/control_robot.py replay \
- --robot-path lerobot/configs/robot/so100.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/so100_test \
- --episode 0
-```
-
-## J. Train a policy
-
-To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
-```bash
-python lerobot/scripts/train.py \
- dataset_repo_id=${HF_USER}/so100_test \
- policy=act_so100_real \
- env=so100_real \
- hydra.run.dir=outputs/train/act_so100_test \
- hydra.job.name=act_so100_test \
- device=cuda \
- wandb.enable=true
-```
-
-Let's explain it:
-1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/so100_test`.
-2. We provided the policy with `policy=act_so100_real`. This loads configurations from [`lerobot/configs/policy/act_so100_real.yaml`](../lerobot/configs/policy/act_so100_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
-3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml).
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
-5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
-
-Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
-
-## K. Evaluate your policy
-
-You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/so100.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/eval_act_so100_test \
- --tags so100 tutorial eval \
- --warmup-time-s 5 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 10 \
- -p outputs/train/act_so100_test/checkpoints/last/pretrained_model
-```
-
-As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
-1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`).
-2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`).
-
-## L. More Information
-
-Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
-
-If you have any question or need help, please reach out on Discord in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
diff --git a/examples/11_use_moss.md b/examples/11_use_moss.md
deleted file mode 100644
index 55d6fcaf94..0000000000
--- a/examples/11_use_moss.md
+++ /dev/null
@@ -1,275 +0,0 @@
-This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot.
-
-## Source the parts
-
-Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
-
-**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
-
-## Install LeRobot
-
-On your computer:
-
-1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
-```bash
-mkdir -p ~/miniconda3
-wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
-bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
-rm ~/miniconda3/miniconda.sh
-~/miniconda3/bin/conda init bash
-```
-
-2. Restart shell or `source ~/.bashrc`
-
-3. Create and activate a fresh conda environment for lerobot
-```bash
-conda create -y -n lerobot python=3.10 && conda activate lerobot
-```
-
-4. Clone LeRobot:
-```bash
-git clone https://github.com/huggingface/lerobot.git ~/lerobot
-```
-
-5. Install LeRobot with dependencies for the feetech motors:
-```bash
-cd ~/lerobot && pip install -e ".[feetech]"
-```
-
-For Linux only (not Mac), install extra dependencies for recording datasets:
-```bash
-conda install -y -c conda-forge ffmpeg
-pip uninstall -y opencv-python
-conda install -y -c conda-forge "opencv>=4.10.0"
-```
-
-## Configure the motors
-
-Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
-
-**Find USB ports associated to your arms**
-To find the correct ports for each arm, run the utility script twice:
-```bash
-python lerobot/scripts/find_motors_bus_port.py
-```
-
-Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
-```
-Finding all available ports for the MotorBus.
-['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
-
-[...Disconnect leader arm and press Enter...]
-
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
-Reconnect the usb cable.
-```
-
-Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
-```
-Finding all available ports for the MotorBus.
-['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
-
-[...Disconnect follower arm and press Enter...]
-
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
-Reconnect the usb cable.
-```
-
-Troubleshooting: On Linux, you might need to give access to the USB ports by running:
-```bash
-sudo chmod 666 /dev/ttyACM0
-sudo chmod 666 /dev/ttyACM1
-```
-
-**Configure your motors**
-Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem58760432961 \
- --brand feetech \
- --model sts3215 \
- --baudrate 1000000 \
- --ID 1
-```
-
-Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
-
-Then unplug your motor and plug the second motor and set its ID to 2.
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem58760432961 \
- --brand feetech \
- --model sts3215 \
- --baudrate 1000000 \
- --ID 2
-```
-
-Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
-
-**Remove the gears of the 6 leader motors**
-Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
-
-**Add motor horn to the motors**
-Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock.
-Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
-
-## Assemble the arms
-
-Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
-
-## Calibrate
-
-Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another.
-
-**Manual calibration of follower arm**
-/!\ Contrarily to step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
-
-You will need to move the follower arm to these positions sequentially:
-
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
-| | | |
-
-Make sure both arms are connected and run this script to launch manual calibration:
-```bash
-python lerobot/scripts/control_robot.py calibrate \
- --robot-path lerobot/configs/robot/moss.yaml \
- --robot-overrides '~cameras' --arms main_follower
-```
-
-**Manual calibration of leader arm**
-Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
-
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
-| | | |
-
-Run this script to launch manual calibration:
-```bash
-python lerobot/scripts/control_robot.py calibrate \
- --robot-path lerobot/configs/robot/moss.yaml \
- --robot-overrides '~cameras' --arms main_leader
-```
-
-## Teleoperate
-
-**Simple teleop**
-Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/moss.yaml \
- --robot-overrides '~cameras' \
- --display-cameras 0
-```
-
-
-**Teleop with displaying cameras**
-Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/moss.yaml
-```
-
-## Record a dataset
-
-Once you're familiar with teleoperation, you can record your first dataset with Moss v1.
-
-If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
-```bash
-huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
-```
-
-Store your Hugging Face repository name in a variable to run these commands:
-```bash
-HF_USER=$(huggingface-cli whoami | head -n 1)
-echo $HF_USER
-```
-
-Record 2 episodes and upload your dataset to the hub:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/moss.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/moss_test \
- --tags moss tutorial \
- --warmup-time-s 5 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 2 \
- --push-to-hub 1
-```
-
-## Visualize a dataset
-
-If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
-```bash
-echo ${HF_USER}/moss_test
-```
-
-If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
-```bash
-python lerobot/scripts/visualize_dataset_html.py \
- --repo-id ${HF_USER}/moss_test
-```
-
-## Replay an episode
-
-Now try to replay the first episode on your robot:
-```bash
-python lerobot/scripts/control_robot.py replay \
- --robot-path lerobot/configs/robot/moss.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/moss_test \
- --episode 0
-```
-
-## Train a policy
-
-To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
-```bash
-python lerobot/scripts/train.py \
- dataset_repo_id=${HF_USER}/moss_test \
- policy=act_moss_real \
- env=moss_real \
- hydra.run.dir=outputs/train/act_moss_test \
- hydra.job.name=act_moss_test \
- device=cuda \
- wandb.enable=true
-```
-
-Let's explain it:
-1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/moss_test`.
-2. We provided the policy with `policy=act_moss_real`. This loads configurations from [`lerobot/configs/policy/act_moss_real.yaml`](../lerobot/configs/policy/act_moss_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
-3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml).
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
-5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
-
-Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
-
-## Evaluate your policy
-
-You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/moss.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/eval_act_moss_test \
- --tags moss tutorial eval \
- --warmup-time-s 5 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 10 \
- -p outputs/train/act_moss_test/checkpoints/last/pretrained_model
-```
-
-As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
-1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_moss_test`).
-2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_moss_test`).
-
-## More
-
-Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
-
-If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925).
diff --git a/examples/1_load_lerobot_dataset.py b/examples/1_load_lerobot_dataset.py
index 96c104b68f..3d357dd19c 100644
--- a/examples/1_load_lerobot_dataset.py
+++ b/examples/1_load_lerobot_dataset.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
@@ -18,7 +32,7 @@
from huggingface_hub import HfApi
import lerobot
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
+from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
# We ported a number of existing datasets ourselves, use this to see the list:
print("List of available datasets:")
@@ -105,7 +119,7 @@
delta_timestamps = {
# loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame
camera_key: [-1, -0.5, -0.20, 0],
- # loads 8 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame
+ # loads 6 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame
"observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0],
# loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future
"action": [t / dataset.fps for t in range(64)],
@@ -129,6 +143,6 @@
for batch in dataloader:
print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w)
- print(f"{batch['observation.state'].shape=}") # (32, 5, c)
+ print(f"{batch['observation.state'].shape=}") # (32, 6, c)
print(f"{batch['action'].shape=}") # (32, 64, c)
break
diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py
index b2fe1dba18..c0c7845e80 100644
--- a/examples/2_evaluate_pretrained_policy.py
+++ b/examples/2_evaluate_pretrained_policy.py
@@ -1,6 +1,25 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
-This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
+This script demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
+
+It requires the installation of the 'gym_pusht' simulation environment. Install it by running:
+```bash
+pip install -e ".[pusht]"
+```
"""
from pathlib import Path
@@ -10,33 +29,22 @@
import imageio
import numpy
import torch
-from huggingface_hub import snapshot_download
-from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
+from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
# Create a directory to store the video of the evaluation
output_directory = Path("outputs/eval/example_pusht_diffusion")
output_directory.mkdir(parents=True, exist_ok=True)
-# Download the diffusion policy for pusht environment
-pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))
-# OR uncomment the following to evaluate a policy from the local outputs/train folder.
+# Select your device
+device = "cuda"
+
+# Provide the [hugging face repo id](https://huggingface.co/lerobot/diffusion_pusht):
+pretrained_policy_path = "lerobot/diffusion_pusht"
+# OR a path to a local outputs/train folder.
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
-policy.eval()
-
-# Check if GPU is available
-if torch.cuda.is_available():
- device = torch.device("cuda")
- print("GPU is available. Device set to:", device)
-else:
- device = torch.device("cpu")
- print(f"GPU is not available. Device set to: {device}. Inference will be slower than on GPU.")
- # Decrease the number of reverse-diffusion steps (trades off a bit of quality for 10x speed)
- policy.diffusion.num_inference_steps = 10
-
-policy.to(device)
# Initialize evaluation environment to render two observation types:
# an image of the scene and state/position of the agent. The environment
@@ -47,7 +55,17 @@
max_episode_steps=300,
)
-# Reset the policy and environmens to prepare for rollout
+# We can verify that the shapes of the features expected by the policy match the ones from the observations
+# produced by the environment
+print(policy.config.input_features)
+print(env.observation_space)
+
+# Similarly, we can check that the actions produced by the policy will match the actions expected by the
+# environment
+print(policy.config.output_features)
+print(env.action_space)
+
+# Reset the policy and environments to prepare for rollout
policy.reset()
numpy_observation, info = env.reset(seed=42)
@@ -101,7 +119,7 @@
rewards.append(reward)
frames.append(env.render())
- # The rollout is considered done when the success state is reach (i.e. terminated is True),
+ # The rollout is considered done when the success state is reached (i.e. terminated is True),
# or the maximum number of iterations is reached (i.e. truncated is True)
done = terminated | truncated | done
step += 1
diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py
index 935ab2dbff..f2de79db89 100644
--- a/examples/3_train_policy.py
+++ b/examples/3_train_policy.py
@@ -1,4 +1,18 @@
-"""This scripts demonstrates how to train Diffusion Policy on the PushT environment.
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script demonstrates how to train Diffusion Policy on the PushT environment.
Once you have trained a model with this script, you can try to evaluate it on
examples/2_evaluate_pretrained_policy.py
@@ -8,72 +22,99 @@
import torch
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
-from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
-
-# Create a directory to store the training checkpoint.
-output_directory = Path("outputs/train/example_pusht_diffusion")
-output_directory.mkdir(parents=True, exist_ok=True)
-
-# Number of offline training steps (we'll only do offline training for this example.)
-# Adjust as you prefer. 5000 steps are needed to get something worth evaluating.
-training_steps = 5000
-device = torch.device("cuda")
-log_freq = 250
-
-# Set up the dataset.
-delta_timestamps = {
- # Load the previous image and state at -0.1 seconds before current frame,
- # then load current image and state corresponding to 0.0 second.
- "observation.image": [-0.1, 0.0],
- "observation.state": [-0.1, 0.0],
- # Load the previous action (-0.1), the next action to be executed (0.0),
- # and 14 future actions with a 0.1 seconds spacing. All these actions will be
- # used to supervise the policy.
- "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
-}
-dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
-
-# Set up the the policy.
-# Policies are initialized with a configuration class, in this case `DiffusionConfig`.
-# For this example, no arguments need to be passed because the defaults are set up for PushT.
-# If you're doing something different, you will likely need to change at least some of the defaults.
-cfg = DiffusionConfig()
-policy = DiffusionPolicy(cfg, dataset_stats=dataset.meta.stats)
-policy.train()
-policy.to(device)
-
-optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
-
-# Create dataloader for offline training.
-dataloader = torch.utils.data.DataLoader(
- dataset,
- num_workers=4,
- batch_size=64,
- shuffle=True,
- pin_memory=device != torch.device("cpu"),
- drop_last=True,
-)
-
-# Run training loop.
-step = 0
-done = False
-while not done:
- for batch in dataloader:
- batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
- output_dict = policy.forward(batch)
- loss = output_dict["loss"]
- loss.backward()
- optimizer.step()
- optimizer.zero_grad()
-
- if step % log_freq == 0:
- print(f"step: {step} loss: {loss.item():.3f}")
- step += 1
- if step >= training_steps:
- done = True
- break
-
-# Save a policy checkpoint.
-policy.save_pretrained(output_directory)
+from lerobot.configs.types import FeatureType
+from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
+from lerobot.datasets.utils import dataset_to_policy_features
+from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
+from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
+
+
+def main():
+ # Create a directory to store the training checkpoint.
+ output_directory = Path("outputs/train/example_pusht_diffusion")
+ output_directory.mkdir(parents=True, exist_ok=True)
+
+ # # Select your device
+ device = torch.device("cuda")
+
+ # Number of offline training steps (we'll only do offline training for this example.)
+ # Adjust as you prefer. 5000 steps are needed to get something worth evaluating.
+ training_steps = 5000
+ log_freq = 1
+
+ # When starting from scratch (i.e. not from a pretrained policy), we need to specify 2 things before
+ # creating the policy:
+ # - input/output shapes: to properly size the policy
+ # - dataset stats: for normalization and denormalization of input/outputs
+ dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
+ features = dataset_to_policy_features(dataset_metadata.features)
+ output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
+ input_features = {key: ft for key, ft in features.items() if key not in output_features}
+
+ # Policies are initialized with a configuration class, in this case `DiffusionConfig`. For this example,
+ # we'll just use the defaults and so no arguments other than input/output features need to be passed.
+ cfg = DiffusionConfig(input_features=input_features, output_features=output_features)
+
+ # We can now instantiate our policy with this config and the dataset stats.
+ policy = DiffusionPolicy(cfg, dataset_stats=dataset_metadata.stats)
+ policy.train()
+ policy.to(device)
+
+ # Another policy-dataset interaction is with the delta_timestamps. Each policy expects a given number frames
+ # which can differ for inputs, outputs and rewards (if there are some).
+ delta_timestamps = {
+ "observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices],
+ "observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices],
+ "action": [i / dataset_metadata.fps for i in cfg.action_delta_indices],
+ }
+
+ # In this case with the standard configuration for Diffusion Policy, it is equivalent to this:
+ delta_timestamps = {
+ # Load the previous image and state at -0.1 seconds before current frame,
+ # then load current image and state corresponding to 0.0 second.
+ "observation.image": [-0.1, 0.0],
+ "observation.state": [-0.1, 0.0],
+ # Load the previous action (-0.1), the next action to be executed (0.0),
+ # and 14 future actions with a 0.1 seconds spacing. All these actions will be
+ # used to supervise the policy.
+ "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
+ }
+
+ # We can then instantiate the dataset with these delta_timestamps configuration.
+ dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
+
+ # Then we create our optimizer and dataloader for offline training.
+ optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
+ dataloader = torch.utils.data.DataLoader(
+ dataset,
+ num_workers=4,
+ batch_size=64,
+ shuffle=True,
+ pin_memory=device.type != "cpu",
+ drop_last=True,
+ )
+
+ # Run training loop.
+ step = 0
+ done = False
+ while not done:
+ for batch in dataloader:
+ batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
+ loss, _ = policy.forward(batch)
+ loss.backward()
+ optimizer.step()
+ optimizer.zero_grad()
+
+ if step % log_freq == 0:
+ print(f"step: {step} loss: {loss.item():.3f}")
+ step += 1
+ if step >= training_steps:
+ done = True
+ break
+
+ # Save a policy checkpoint.
+ policy.save_pretrained(output_directory)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md
index 2e443d57bd..d6cd6cc23a 100644
--- a/examples/4_train_policy_with_script.md
+++ b/examples/4_train_policy_with_script.md
@@ -1,178 +1,241 @@
-This tutorial will explain the training script, how to use it, and particularly the use of Hydra to configure everything needed for the training run.
+This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run.
+
+> **Note:** The following assumes you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
## The training script
-LeRobot offers a training script at [`lerobot/scripts/train.py`](../../lerobot/scripts/train.py). At a high level it does the following:
+LeRobot offers a training script at [`lerobot/scripts/train.py`](../src/lerobot/scripts/train.py). At a high level it does the following:
-- Loads a Hydra configuration file for the following steps (more on Hydra in a moment).
-- Makes a simulation environment.
-- Makes a dataset corresponding to that simulation environment.
-- Makes a policy.
+- Initialize/load a configuration for the following steps using.
+- Instantiates a dataset.
+- (Optional) Instantiates a simulation environment corresponding to that dataset.
+- Instantiates a policy.
- Runs a standard training loop with forward pass, backward pass, optimization step, and occasional logging, evaluation (of the policy on the environment), and checkpointing.
-## Basics of how we use Hydra
-
-Explaining the ins and outs of [Hydra](https://hydra.cc/docs/intro/) is beyond the scope of this document, but here we'll share the main points you need to know.
+## Overview of the configuration system
-First, `lerobot/configs` has a directory structure like this:
+In the training script, the main function `train` expects a `TrainPipelineConfig` object:
+
+```python
+# train.py
+@parser.wrap()
+def train(cfg: TrainPipelineConfig):
```
-.
-├── default.yaml
-├── env
-│ ├── aloha.yaml
-│ ├── pusht.yaml
-│ └── xarm.yaml
-└── policy
- ├── act.yaml
- ├── diffusion.yaml
- └── tdmpc.yaml
-```
+
+
+You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../src/lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option)
-**_For brevity, in the rest of this document we'll drop the leading `lerobot/configs` path. So `default.yaml` really refers to `lerobot/configs/default.yaml`._**
+When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated to this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.)
-When you run the training script with
+Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes:
+
```python
-python lerobot/scripts/train.py
+@dataclass
+class TrainPipelineConfig:
+ dataset: DatasetConfig
+ env: envs.EnvConfig | None = None
+ policy: PreTrainedConfig | None = None
```
+
-Hydra is set up to read `default.yaml` (via the `@hydra.main` decorator). If you take a look at the `@hydra.main`'s arguments you will see `config_path="../configs", config_name="default"`. At the top of `default.yaml`, is a `defaults` section which looks likes this:
+in which `DatasetConfig` for example is defined as such:
-```yaml
-defaults:
- - _self_
- - env: pusht
- - policy: diffusion
+
+```python
+@dataclass
+class DatasetConfig:
+ repo_id: str
+ episodes: list[int] | None = None
+ video_backend: str = "pyav"
```
+
+
+This creates a hierarchical relationship where, for example assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`.
+From the command line, we can specify this value by using a very similar syntax `--dataset.repo_id=repo/id`.
-This logic tells Hydra to incorporate configuration parameters from `env/pusht.yaml` and `policy/diffusion.yaml`. _Note: Be aware of the order as any configuration parameters with the same name will be overidden. Thus, `default.yaml` is overridden by `env/pusht.yaml` which is overidden by `policy/diffusion.yaml`_.
+By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified.
-Then, `default.yaml` also contains common configuration parameters such as `device: cuda` or `use_amp: false` (for enabling fp16 training). Some other parameters are set to `???` which indicates that they are expected to be set in additional yaml files. For instance, `training.offline_steps: ???` in `default.yaml` is set to `200000` in `diffusion.yaml`.
+## Specifying values from the CLI
-Thanks to this `defaults` section in `default.yaml`, if you want to train Diffusion Policy with PushT, you really only need to run:
+Let's say that we want to train [Diffusion Policy](../src/lerobot/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this:
```bash
-python lerobot/scripts/train.py
+python -m lerobot.scripts.train \
+ --dataset.repo_id=lerobot/pusht \
+ --policy.type=diffusion \
+ --env.type=pusht
```
-However, you can be more explicit and launch the exact same Diffusion Policy training on PushT with:
+Let's break this down:
-```bash
-python lerobot/scripts/train.py policy=diffusion env=pusht
-```
+- To specify the dataset, we just need to specify its `repo_id` on the hub which is the only required argument in the `DatasetConfig`. The rest of the fields have default values and in this case we are fine with those so we can just add the option `--dataset.repo_id=lerobot/pusht`.
+- To specify the policy, we can just select diffusion policy using `--policy` appended with `.type`. Here, `.type` is a special argument which allows us to select config classes inheriting from `draccus.ChoiceRegistry` and that have been decorated with the `register_subclass()` method. To have a better explanation of this feature, have a look at this [Draccus demo](https://github.com/dlwh/draccus?tab=readme-ov-file#more-flexible-configuration-with-choice-types). In our code, we use this mechanism mainly to select policies, environments, robots, and some other components like optimizers. The policies available to select are located in [lerobot/policies](../src/lerobot/policies)
+- Similarly, we select the environment with `--env.type=pusht`. The different environment configs are available in [`lerobot/envs/configs.py`](../src/lerobot/envs/configs.py)
-This way of overriding defaults via the CLI is especially useful when you want to change the policy and/or environment. For instance, you can train ACT on the default Aloha environment with:
+Let's see another example. Let's say you've been training [ACT](../src/lerobot/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with:
```bash
-python lerobot/scripts/train.py policy=act env=aloha
+python -m lerobot.scripts.train \
+ --policy.type=act \
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
+ --env.type=aloha \
+ --output_dir=outputs/train/act_aloha_insertion
```
-There are two things to note here:
-- Config overrides are passed as `param_name=param_value`.
-- Here we have overridden the defaults section. `policy=act` tells Hydra to use `policy/act.yaml`, and `env=aloha` tells Hydra to use `env/aloha.yaml`.
-
-_As an aside: we've set up all of our configurations so that they reproduce state-of-the-art results from papers in the literature._
+> Notice we added `--output_dir` to explicitly tell where to write outputs from this run (checkpoints, training state, configs etc.). This is not mandatory and if you don't specify it, a default directory will be created from the current date and time, env.type and policy.type. This will typically look like `outputs/train/2025-01-24/16-10-05_aloha_act`.
-## Overriding configuration parameters in the CLI
+We now want to train a different policy for aloha on another task. We'll change the dataset and use [lerobot/aloha_sim_transfer_cube_human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) instead. Of course, we also need to change the task of the environment as well to match this other task.
+Looking at the [`AlohaEnv`](../src/lerobot/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using:
-Now let's say that we want to train on a different task in the Aloha environment. If you look in `env/aloha.yaml` you will see something like:
+```bash
+python -m lerobot.scripts.train \
+ --policy.type=act \
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
+ --env.type=aloha \
+ --env.task=AlohaTransferCube-v0 \
+ --output_dir=outputs/train/act_aloha_transfer
+```
-```yaml
-# lerobot/configs/env/aloha.yaml
-env:
- task: AlohaInsertion-v0
+## Loading from a config file
+
+Now, let's assume that we want to reproduce the run just above. That run has produced a `train_config.json` file in its checkpoints, which serializes the `TrainPipelineConfig` instance it used:
+
+```json
+{
+ "dataset": {
+ "repo_id": "lerobot/aloha_sim_transfer_cube_human",
+ "episodes": null,
+ ...
+ },
+ "env": {
+ "type": "aloha",
+ "task": "AlohaTransferCube-v0",
+ "fps": 50,
+ ...
+ },
+ "policy": {
+ "type": "act",
+ "n_obs_steps": 1,
+ ...
+ },
+ ...
+}
```
-And if you look in `policy/act.yaml` you will see something like:
+We can then simply load the config values from this file using:
-```yaml
-# lerobot/configs/policy/act.yaml
-dataset_repo_id: lerobot/aloha_sim_insertion_human
+```bash
+python -m lerobot.scripts.train \
+ --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
+ --output_dir=outputs/train/act_aloha_transfer_2
```
-But our Aloha environment actually supports a cube transfer task as well. To train for this task, you could manually modify the two yaml configuration files respectively.
+`--config_path` is also a special argument which allows to initialize the config from a local config file. It can point to a directory that contains `train_config.json` or to the config file itself directly.
-First, we'd need to switch to using the cube transfer task for the ALOHA environment.
+Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.:
-```diff
-# lerobot/configs/env/aloha.yaml
-env:
-- task: AlohaInsertion-v0
-+ task: AlohaTransferCube-v0
+```bash
+python -m lerobot.scripts.train \
+ --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
+ --output_dir=outputs/train/act_aloha_transfer_2
+ --policy.n_action_steps=80
```
-Then, we'd also need to switch to using the cube transfer dataset.
+> Note: While `--output_dir` is not required in general, in this case we need to specify it since it will otherwise take the value from the `train_config.json` (which is `outputs/train/act_aloha_transfer`). In order to prevent accidental deletion of previous run checkpoints, we raise an error if you're trying to write in an existing directory. This is not the case when resuming a run, which is what you'll learn next.
+
+`--config_path` can also accept the repo_id of a repo on the hub that contains a `train_config.json` file, e.g. running:
-```diff
-# lerobot/configs/policy/act.yaml
--dataset_repo_id: lerobot/aloha_sim_insertion_human
-+dataset_repo_id: lerobot/aloha_sim_transfer_cube_human
+```bash
+python -m lerobot.scripts.train --config_path=lerobot/diffusion_pusht
```
-Then, you'd be able to run:
+will start a training run with the same configuration used for training [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)
+
+## Resume training
+
+Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to do that here.
+
+Let's reuse the command from the previous run and add a few more options:
```bash
-python lerobot/scripts/train.py policy=act env=aloha
+python -m lerobot.scripts.train \
+ --policy.type=act \
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
+ --env.type=aloha \
+ --env.task=AlohaTransferCube-v0 \
+ --log_freq=25 \
+ --save_freq=100 \
+ --output_dir=outputs/train/run_resumption
```
-and you'd be training and evaluating on the cube transfer task.
+Here we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can showcase resumption. You should be able to see some logging and have a first checkpoint within 1 minute (depending on hardware). Wait for the first checkpoint to happen, you should see a line that looks like this in your terminal:
+
+```
+INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100
+```
-An alternative approach to editing the yaml configuration files, would be to override the defaults via the command line:
+Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with:
```bash
-python lerobot/scripts/train.py \
- policy=act \
- dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
- env=aloha \
- env.task=AlohaTransferCube-v0
+python -m lerobot.scripts.train \
+ --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
+ --resume=true
```
-There's something new here. Notice the `.` delimiter used to traverse the configuration hierarchy. _But be aware that the `defaults` section is an exception. As you saw above, we didn't need to write `defaults.policy=act` in the CLI. `policy=act` was enough._
+You should see from the logging that your training picks up from where it left off.
-Putting all that knowledge together, here's the command that was used to train https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human.
+Another reason for which you might want to resume a run is simply to extend training and add more training steps. The number of training steps is set by the option `--steps`, which is 100 000 by default.
+You could double the number of steps of the previous run with:
```bash
-python lerobot/scripts/train.py \
- hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human \
- device=cuda
- env=aloha \
- env.task=AlohaTransferCube-v0 \
- dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
- policy=act \
- training.eval_freq=10000 \
- training.log_freq=250 \
- training.offline_steps=100000 \
- training.save_model=true \
- training.save_freq=25000 \
- eval.n_episodes=50 \
- eval.batch_size=50 \
- wandb.enable=false \
+python -m lerobot.scripts.train \
+ --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
+ --resume=true \
+ --steps=200000
```
-There's one new thing here: `hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human`, which specifies where to save the training output.
-
-## Using a configuration file not in `lerobot/configs`
+## Outputs of a run
-Above we discusses the our training script is set up such that Hydra looks for `default.yaml` in `lerobot/configs`. But, if you have a configuration file elsewhere in your filesystem you may use:
+In the output directory, there will be a folder called `checkpoints` with the following structure:
```bash
-python lerobot/scripts/train.py --config-dir PARENT/PATH --config-name FILE_NAME_WITHOUT_EXTENSION
+outputs/train/run_resumption/checkpoints
+├── 000100 # checkpoint_dir for training step 100
+│ ├── pretrained_model/
+│ │ ├── config.json # policy config
+│ │ ├── model.safetensors # policy weights
+│ │ └── train_config.json # train config
+│ └── training_state/
+│ ├── optimizer_param_groups.json # optimizer param groups
+│ ├── optimizer_state.safetensors # optimizer state
+│ ├── rng_state.safetensors # rng states
+│ ├── scheduler_state.json # scheduler state
+│ └── training_step.json # training step
+├── 000200
+└── last -> 000200 # symlink to the last available checkpoint
```
-Note: here we use regular syntax for providing CLI arguments to a Python script, not Hydra's `param_name=param_value` syntax.
+## Fine-tuning a pre-trained policy
-As a concrete example, this becomes particularly handy when you have a folder with training outputs, and would like to re-run the training. For example, say you previously ran the training script with one of the earlier commands and have `outputs/train/my_experiment/checkpoints/pretrained_model/config.yaml`. This `config.yaml` file will have the full set of configuration parameters within it. To run the training with the same configuration again, do:
+In addition to the features currently in Draccus, we've added a special `.path` argument for the policy, which allows to load a policy as you would with `PreTrainedPolicy.from_pretrained()`. In that case, `path` can be a local directory that contains a checkpoint or a repo_id pointing to a pretrained policy on the hub.
+
+For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. We can achieve this with:
```bash
-python lerobot/scripts/train.py --config-dir outputs/train/my_experiment/checkpoints/last/pretrained_model --config-name config
+python -m lerobot.scripts.train \
+ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
+ --env.type=aloha \
+ --env.task=AlohaInsertion-v0
```
-Note that you may still use the regular syntax for config parameter overrides (eg: by adding `training.offline_steps=200000`).
+When doing so, keep in mind that the features of the fine-tuning dataset would have to match the input/output features of the pretrained policy.
## Typical logs and metrics
-When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you config it correctly and your config is not overrided by other files. The final configuration will also be saved with the checkpoint.
+When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you configured your run correctly. The final configuration will also be saved with the checkpoint.
After that, you will see training log like this one:
@@ -180,7 +243,7 @@ After that, you will see training log like this one:
INFO 2024-08-14 13:35:12 ts/train.py:192 step:0 smpl:64 ep:1 epch:0.00 loss:1.112 grdn:15.387 lr:2.0e-07 updt_s:1.738 data_s:4.774
```
-or evaluation log like:
+or evaluation log:
```
INFO 2024-08-14 13:38:45 ts/train.py:226 step:100 smpl:6K ep:52 epch:0.25 ∑rwrd:20.693 success:0.0% eval_s:120.266
@@ -200,14 +263,49 @@ These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here
Some metrics are useful for initial performance profiling. For example, if you find the current GPU utilization is low via the `nvidia-smi` command and `data_s` sometimes is too high, you may need to modify batch size or number of dataloading workers to accelerate dataloading. We also recommend [pytorch profiler](https://github.com/huggingface/lerobot?tab=readme-ov-file#improve-your-code-with-profiling) for detailed performance probing.
----
+## In short
+
+We'll summarize here the main use cases to remember from this tutorial.
+
+#### Train a policy from scratch – CLI
+
+```bash
+python -m lerobot.scripts.train \
+ --policy.type=act \ # <- select 'act' policy
+ --env.type=pusht \ # <- select 'pusht' environment
+ --dataset.repo_id=lerobot/pusht # <- train on this dataset
+```
-So far we've seen how to train Diffusion Policy for PushT and ACT for ALOHA. Now, what if we want to train ACT for PushT? Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
+#### Train a policy from scratch - config file + CLI
```bash
-python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht
+python -m lerobot.scripts.train \
+ --config_path=path/to/pretrained_model \ # <- can also be a repo_id
+ --policy.n_action_steps=80 # <- you may still override values
```
-Please, head on over to our [advanced tutorial on adapting policy configuration to various environments](./advanced/train_act_pusht/train_act_pusht.md) to learn more.
+#### Resume/continue a training run
+
+```bash
+python -m lerobot.scripts.train \
+ --config_path=checkpoint/pretrained_model/ \
+ --resume=true \
+ --steps=200000 # <- you can change some training parameters
+```
+
+#### Fine-tuning
+
+```bash
+python -m lerobot.scripts.train \
+ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ # <- can also be a local path to a checkpoint
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
+ --env.type=aloha \
+ --env.task=AlohaInsertion-v0
+```
+
+---
+
+Now that you know the basics of how to train a policy, you might want to know how to apply this knowledge to actual robots, or how to record your own datasets and train policies on your specific task?
+If that's the case, head over to the next tutorial [`7_get_started_with_real_robot.md`](./7_get_started_with_real_robot.md).
-Or in the meantime, happy coding! 🤗
+Or in the meantime, happy training! 🤗
diff --git a/examples/5_resume_training.md b/examples/5_resume_training.md
deleted file mode 100644
index 6e8d684dae..0000000000
--- a/examples/5_resume_training.md
+++ /dev/null
@@ -1,37 +0,0 @@
-This tutorial explains how to resume a training run that you've started with the training script. If you don't know how our training script and configuration system works, please read [4_train_policy_with_script.md](./4_train_policy_with_script.md) first.
-
-## Basic training resumption
-
-Let's consider the example of training ACT for one of the ALOHA tasks. Here's a command that can achieve that:
-
-```bash
-python lerobot/scripts/train.py \
- hydra.run.dir=outputs/train/run_resumption \
- policy=act \
- dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
- env=aloha \
- env.task=AlohaTransferCube-v0 \
- training.log_freq=25 \
- training.save_checkpoint=true \
- training.save_freq=100
-```
-
-Here we're using the default dataset and environment for ACT, and we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can test resumption. You should be able to see some logging and have a first checkpoint within 1 minute. Please interrupt the training after the first checkpoint.
-
-To resume, all that we have to do is run the training script, providing the run directory, and the resume option:
-
-```bash
-python lerobot/scripts/train.py \
- hydra.run.dir=outputs/train/run_resumption \
- resume=true
-```
-
-You should see from the logging that your training picks up from where it left off.
-
-Note that with `resume=true`, the configuration file from the last checkpoint in the training output directory is loaded. So it doesn't matter that we haven't provided all the other configuration parameters from our previous command (although there may be warnings to notify you that your command has a different configuration than than the checkpoint).
-
----
-
-Now you should know how to resume your training run in case it gets interrupted or you want to extend a finished training run.
-
-Happy coding! 🤗
diff --git a/examples/6_add_image_transforms.py b/examples/6_add_image_transforms.py
deleted file mode 100644
index 882710e3d9..0000000000
--- a/examples/6_add_image_transforms.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
-augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
-transforms are applied to the observation images before they are returned in the dataset's __getitem__.
-"""
-
-from pathlib import Path
-
-from torchvision.transforms import ToPILImage, v2
-
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-
-dataset_repo_id = "lerobot/aloha_static_screw_driver"
-
-# Create a LeRobotDataset with no transformations
-dataset = LeRobotDataset(dataset_repo_id, episodes=[0])
-# This is equivalent to `dataset = LeRobotDataset(dataset_repo_id, image_transforms=None)`
-
-# Get the index of the first observation in the first episode
-first_idx = dataset.episode_data_index["from"][0].item()
-
-# Get the frame corresponding to the first camera
-frame = dataset[first_idx][dataset.meta.camera_keys[0]]
-
-
-# Define the transformations
-transforms = v2.Compose(
- [
- v2.ColorJitter(brightness=(0.5, 1.5)),
- v2.ColorJitter(contrast=(0.5, 1.5)),
- v2.ColorJitter(hue=(-0.1, 0.1)),
- v2.RandomAdjustSharpness(sharpness_factor=2, p=1),
- ]
-)
-
-# Create another LeRobotDataset with the defined transformations
-transformed_dataset = LeRobotDataset(dataset_repo_id, episodes=[0], image_transforms=transforms)
-
-# Get a frame from the transformed dataset
-transformed_frame = transformed_dataset[first_idx][transformed_dataset.meta.camera_keys[0]]
-
-# Create a directory to store output images
-output_dir = Path("outputs/image_transforms")
-output_dir.mkdir(parents=True, exist_ok=True)
-
-# Save the original frame
-to_pil = ToPILImage()
-to_pil(frame).save(output_dir / "original_frame.png", quality=100)
-print(f"Original frame saved to {output_dir / 'original_frame.png'}.")
-
-# Save the transformed frame
-to_pil(transformed_frame).save(output_dir / "transformed_frame.png", quality=100)
-print(f"Transformed frame saved to {output_dir / 'transformed_frame.png'}.")
diff --git a/examples/7_get_started_with_real_robot.md b/examples/7_get_started_with_real_robot.md
deleted file mode 100644
index 4c7bfbd115..0000000000
--- a/examples/7_get_started_with_real_robot.md
+++ /dev/null
@@ -1,1042 +0,0 @@
-# Getting Started with Real-World Robots
-
-This tutorial will guide you through the process of setting up and training a neural network to autonomously control a real robot.
-
-**What You'll Learn:**
-1. How to order and assemble your robot.
-2. How to connect, configure, and calibrate your robot.
-3. How to record and visualize your dataset.
-4. How to train a policy using your data and prepare it for evaluation.
-5. How to evaluate your policy and visualize the results.
-
-By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934).
-
-This tutorial is specifically made for the affordable [Koch v1.1](https://github.com/jess-moss/koch-v1-1) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
-
-During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously.
-
-If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests. Thanks!
-
-## 1. Order and Assemble your Koch v1.1
-
-Follow the sourcing and assembling instructions provided on the [Koch v1.1 Github page](https://github.com/jess-moss/koch-v1-1). This will guide you through setting up both the follower and leader arms, as shown in the image below.
-
-
-
-
-
-For a visual walkthrough of the assembly process, you can refer to [this video tutorial](https://youtu.be/8nQIg9BwwTk).
-
-## 2. Configure motors, calibrate arms, teleoperate your Koch v1.1
-
-First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands (make sure gcc is installed).
-
-Using `pip`:
-```bash
-pip install -e ".[dynamixel]"
-```
-
-Or using `poetry`:
-```bash
-poetry install --sync --extras "dynamixel"
-```
-
-/!\ For Linux only, ffmpeg and opencv requires conda install for now. Run this exact sequence of commands:
-```bash
-conda install -c conda-forge ffmpeg
-pip uninstall opencv-python
-conda install -c conda-forge "opencv>=4.10.0"
-```
-
-You are now ready to plug the 5V power supply to the motor bus of the leader arm (the smaller one) since all its motors only require 5V.
-
-Then plug the 12V power supply to the motor bus of the follower arm. It has two motors that need 12V, and the rest will be powered with 5V through the voltage convertor.
-
-Finally, connect both arms to your computer via USB. Note that the USB doesn't provide any power, and both arms need to be plugged in with their associated power supply to be detected by your computer.
-
-Now you are ready to configure your motors for the first time, as detailed in the sections below. In the upcoming sections, you'll learn about our classes and functions by running some python code in an interactive session, or by copy-pasting it in a python file.
-
-If you have already configured your motors the first time, you can streamline the process by directly running the teleoperate script (which is detailed further in the tutorial):
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/koch.yaml \
- --robot-overrides '~cameras' # do not instantiate the cameras
-```
-
-It will automatically:
-1. Identify any missing calibrations and initiate the calibration procedure.
-2. Connect the robot and start teleoperation.
-
-### a. Control your motors with DynamixelMotorsBus
-
-You can use the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) to communicate with the motors connected as a chain to the corresponding USB bus. This class leverages the Python [Dynamixel SDK](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20) to facilitate reading from and writing to the motors.
-
-**First Configuration of your motors**
-
-You will need to unplug each motor in turn and run a command the identify the motor. The motor will save its own identification, so you only need to do this once. Start by unplugging all of the motors.
-
-Do the Leader arm first, as all of its motors are of the same type. Plug in your first motor on your leader arm and run this script to set its ID to 1.
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem58760432961 \
- --brand dynamixel \
- --model xl330-m288 \
- --baudrate 1000000 \
- --ID 1
-```
-
-Then unplug your first motor and plug the second motor and set its ID to 2.
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem58760432961 \
- --brand dynamixel \
- --model xl330-m288 \
- --baudrate 1000000 \
- --ID 2
-```
-
-Redo the process for all your motors until ID 6.
-
-The process for the follower arm is almost the same, but the follower arm has two types of motors. For the first two motors, make sure you set the model to `xl430-w250`. _Important: configuring follower motors requires plugging and unplugging power. Make sure you use the 5V power for the XL330s and the 12V power for the XL430s!_
-
-After all of your motors are configured properly, you're ready to plug them all together in a daisy-chain as shown in the original video.
-
-**Instantiate the DynamixelMotorsBus**
-
-To begin, create two instances of the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py), one for each arm, using their corresponding USB ports (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`).
-
-To find the correct ports for each arm, run the utility script twice:
-```bash
-python lerobot/scripts/find_motors_bus_port.py
-```
-
-Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
-```
-Finding all available ports for the MotorBus.
-['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
-
-[...Disconnect leader arm and press Enter...]
-
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
-Reconnect the usb cable.
-```
-
-Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
-```
-Finding all available ports for the MotorBus.
-['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
-
-[...Disconnect follower arm and press Enter...]
-
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
-Reconnect the usb cable.
-```
-
-Troubleshooting: On Linux, you might need to give access to the USB ports by running:
-```bash
-sudo chmod 666 /dev/ttyACM0
-sudo chmod 666 /dev/ttyACM1
-```
-
-*Listing and Configuring Motors*
-
-Next, you'll need to list the motors for each arm, including their name, index, and model. Initially, each motor is assigned the factory default index `1`. Since each motor requires a unique index to function correctly when connected in a chain on a common bus, you'll need to assign different indices. It's recommended to use an ascending index order, starting from `1` (e.g., `1, 2, 3, 4, 5, 6`). These indices will be saved in the persistent memory of each motor during the first connection.
-
-To assign indices to the motors, run this code in an interactive Python session. Replace the `port` values with the ones you identified earlier:
-```python
-from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus
-
-leader_port = "/dev/tty.usbmodem575E0031751"
-follower_port = "/dev/tty.usbmodem575E0032081"
-
-leader_arm = DynamixelMotorsBus(
- port=leader_port,
- motors={
- # name: (index, model)
- "shoulder_pan": (1, "xl330-m077"),
- "shoulder_lift": (2, "xl330-m077"),
- "elbow_flex": (3, "xl330-m077"),
- "wrist_flex": (4, "xl330-m077"),
- "wrist_roll": (5, "xl330-m077"),
- "gripper": (6, "xl330-m077"),
- },
-)
-
-follower_arm = DynamixelMotorsBus(
- port=follower_port,
- motors={
- # name: (index, model)
- "shoulder_pan": (1, "xl430-w250"),
- "shoulder_lift": (2, "xl430-w250"),
- "elbow_flex": (3, "xl330-m288"),
- "wrist_flex": (4, "xl330-m288"),
- "wrist_roll": (5, "xl330-m288"),
- "gripper": (6, "xl330-m288"),
- },
-)
-```
-
-*Updating the YAML Configuration File*
-
-Next, update the port values in the YAML configuration file for the Koch robot at [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the ports you've identified:
-```yaml
-[...]
-robot_type: koch
-leader_arms:
- main:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem575E0031751 # <- Update
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl330-m077"]
- shoulder_lift: [2, "xl330-m077"]
- elbow_flex: [3, "xl330-m077"]
- wrist_flex: [4, "xl330-m077"]
- wrist_roll: [5, "xl330-m077"]
- gripper: [6, "xl330-m077"]
-follower_arms:
- main:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem575E0032081 # <- Update
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl430-w250"]
- shoulder_lift: [2, "xl430-w250"]
- elbow_flex: [3, "xl330-m288"]
- wrist_flex: [4, "xl330-m288"]
- wrist_roll: [5, "xl330-m288"]
- gripper: [6, "xl330-m288"]
-[...]
-```
-
-Don't forget to set `robot_type: aloha` if you follow this tutorial with [Aloha bimanual robot](aloha-2.github.io) instead of Koch v1.1
-
-This configuration file is used to instantiate your robot across all scripts. We'll cover how this works later on.
-
-**Connect and Configure your Motors**
-
-Before you can start using your motors, you'll need to configure them to ensure proper communication. When you first connect the motors, the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) automatically detects any mismatch between the current motor indices (factory set to `1`) and the specified indices (e.g., `1, 2, 3, 4, 5, 6`). This triggers a configuration procedure that requires you to unplug the power cord and motors, then reconnect each motor sequentially, starting from the one closest to the bus.
-
-For a visual guide, refer to the [video tutorial of the configuration procedure](https://youtu.be/U78QQ9wCdpY).
-
-To connect and configure the leader arm, run the following code in the same Python interactive session as earlier in the tutorial:
-```python
-leader_arm.connect()
-```
-
-When you connect the leader arm for the first time, you might see an output similar to this:
-```
-Read failed due to communication error on port /dev/tty.usbmodem575E0032081 for group_key ID_shoulder_pan_shoulder_lift_elbow_flex_wrist_flex_wrist_roll_gripper: [TxRxResult] There is no status packet!
-
-/!\ A configuration issue has been detected with your motors:
-If this is the first time you are using these motors, press enter to configure your motors... but before verify that all the cables are connected the proper way. If you find an issue, before making a modification, kill the python process, unplug the power cord to not damage the motors, rewire correctly, then plug the power again and relaunch the script.
-
-Motor indices detected: {9600: [1]}
-
-1. Unplug the power cord
-2. Plug/unplug minimal number of cables to only have the first 1 motor(s) (['shoulder_pan']) connected.
-3. Re-plug the power cord
-Press Enter to continue...
-
-*Follow the procedure*
-
-Setting expected motor indices: [1, 2, 3, 4, 5, 6]
-```
-
-Once the leader arm is configured, repeat the process for the follower arm by running:
-```python
-follower_arm.connect()
-```
-
-Congratulations! Both arms are now properly configured and connected. You won't need to go through the configuration procedure again in the future.
-
-**Troubleshooting**:
-
-If the configuration process fails, you may need to do the configuration process via the Dynamixel Wizard.
-
-Known failure modes:
-- Calling `arm.connect()` raises `OSError: No motor found, but one new motor expected. Verify power cord is plugged in and retry` on Ubuntu 22.
-
-Steps:
-1. Visit https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_wizard2/#connect-dynamixel.
-2. Follow the software installation instructions in section 3 of the web page.
-3. Launch the software.
-4. Configure the device scanning options in the menu under `Tools` > `Options` > `Scan`. Check only Protocol 2.0, select only the USB port identifier of interest, select all baudrates, set the ID range to `[0, 10]`. _While this step was not strictly necessary, it greatly speeds up scanning_.
-5. For each motor in turn:
- - Disconnect the power to the driver board.
- - Connect **only** the motor of interest to the driver board, making sure to disconnect it from any other motors.
- - Reconnect the power to the driver board.
- - From the software menu select `Device` > `Scan` and let the scan run. A device should appear.
- - If the device has an asterisk (*) near it, it means the firmware is indeed outdated. From the software menu, select `Tools` > `Firmware Update`. Follow the prompts.
- - The main panel should have table with various parameters of the device (refer to the web page, section 5). Select the row with `ID`, and then set the desired ID on the bottom right panel by selecting and clicking `Save`.
- - Just like you did with the ID, also set the `Baud Rate` to 1 Mbps.
-6. Check everything has been done right:
- - Rewire the arms in their final configuration and power both of them.
- - Scan for devices. All 12 motors should appear.
- - Select the motors one by one and move the arm. Check that the graphical indicator near the top right shows the movement.
-
-**Read and Write with DynamixelMotorsBus**
-
-To get familiar with how `DynamixelMotorsBus` communicates with the motors, you can start by reading data from them. Copy past this code in the same interactive python session:
-```python
-leader_pos = leader_arm.read("Present_Position")
-follower_pos = follower_arm.read("Present_Position")
-print(leader_pos)
-print(follower_pos)
-```
-
-Expected output might look like:
-```
-array([2054, 523, 3071, 1831, 3049, 2441], dtype=int32)
-array([2003, 1601, 56, 2152, 3101, 2283], dtype=int32)
-```
-
-Try moving the arms to various positions and observe how the values change.
-
-Now let's try to enable torque in the follower arm by copy pasting this code:
-```python
-from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
-
-follower_arm.write("Torque_Enable", TorqueMode.ENABLED.value)
-```
-
-With torque enabled, the follower arm will be locked in its current position. Do not attempt to manually move the arm while torque is enabled, as this could damage the motors.
-
-Now, to get more familiar with reading and writing, let's move the arm programmatically copy pasting the following example code:
-```python
-# Get the current position
-position = follower_arm.read("Present_Position")
-
-# Update first motor (shoulder_pan) position by +10 steps
-position[0] += 10
-follower_arm.write("Goal_Position", position)
-
-# Update all motors position by -30 steps
-position -= 30
-follower_arm.write("Goal_Position", position)
-
-# Update gripper by +30 steps
-position[-1] += 30
-follower_arm.write("Goal_Position", position[-1], "gripper")
-```
-
-When you're done playing, you can try to disable the torque, but make sure you hold your robot so that it doesn't fall:
-```python
-follower_arm.write("Torque_Enable", TorqueMode.DISABLED.value)
-```
-
-Finally, disconnect the arms:
-```python
-leader_arm.disconnect()
-follower_arm.disconnect()
-```
-
-Alternatively, you can unplug the power cord, which will automatically disable torque and disconnect the motors.
-
-*/!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
-
-### b. Teleoperate your Koch v1.1 with ManipulatorRobot
-
-**Instantiate the ManipulatorRobot**
-
-Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_arm` and `follower_arm`.
-
-For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_arm}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_arm, "right": right_leader_arm},`. Same thing for the follower arms.
-
-You also need to provide a path to a calibration directory, such as `calibration_dir=".cache/calibration/koch"`. More on this in the next section.
-
-Run the following code to instantiate your manipulator robot:
-```python
-from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
-
-robot = ManipulatorRobot(
- robot_type="koch",
- leader_arms={"main": leader_arm},
- follower_arms={"main": follower_arm},
- calibration_dir=".cache/calibration/koch",
-)
-```
-
-The `robot_type="koch"` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
-
-For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `robot_type="aloha"` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected. If you need to run manual calibration, simply update `calibration_dir` to `.cache/calibration/aloha`.
-
-**Calibrate and Connect the ManipulatorRobot**
-
-Next, you'll need to calibrate your Koch robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Koch robot to work on another.
-
-When you connect your robot for the first time, the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) will detect if the calibration file is missing and trigger the calibration procedure. During this process, you will be guided to move each arm to three different positions.
-
-Here are the positions you'll move the follower arm to:
-
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
-| | | |
-
-And here are the corresponding positions for the leader arm:
-
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
-| | | |
-
-You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
-
-During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to mesure if the values changed negatively or positively.
-
-Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
-
-Importantly, once calibrated, all Koch robots will move to the same positions (e.g. zero and rotated position) when commanded.
-
-Run the following code to calibrate and connect your robot:
-```python
-robot.connect()
-```
-
-The output will look like this:
-```
-Connecting main follower arm
-Connecting main leader arm
-
-Missing calibration file '.cache/calibration/koch/main_follower.json'
-Running calibration of koch main follower...
-Move arm to zero position
-[...]
-Move arm to rotated position
-[...]
-Move arm to rest position
-[...]
-Calibration is done! Saving calibration file '.cache/calibration/koch/main_follower.json'
-
-Missing calibration file '.cache/calibration/koch/main_leader.json'
-Running calibration of koch main leader...
-Move arm to zero position
-[...]
-Move arm to rotated position
-[...]
-Move arm to rest position
-[...]
-Calibration is done! Saving calibration file '.cache/calibration/koch/main_leader.json'
-```
-
-*Verifying Calibration*
-
-Once calibration is complete, you can check the positions of the leader and follower arms to ensure they match. If the calibration was successful, the positions should be very similar.
-
-Run this code to get the positions in degrees:
-```python
-leader_pos = robot.leader_arms["main"].read("Present_Position")
-follower_pos = robot.follower_arms["main"].read("Present_Position")
-
-print(leader_pos)
-print(follower_pos)
-```
-
-Example output:
-```
-array([-0.43945312, 133.94531, 179.82422, -18.984375, -1.9335938, 34.541016], dtype=float32)
-array([-0.58723712, 131.72314, 174.98743, -16.872612, 0.786213, 35.271973], dtype=float32)
-```
-
-These values are in degrees, which makes them easier to interpret and debug. The zero position used during calibration should roughly correspond to 0 degrees for each motor, and the rotated position should roughly correspond to 90 degrees for each motor.
-
-**Teleoperate your Koch v1.1**
-
-You can easily teleoperate your robot by reading the positions from the leader arm and sending them as goal positions to the follower arm.
-
-To teleoperate your robot for 30 seconds at a frequency of approximately 200Hz, run the following code:
-```python
-import tqdm
-seconds = 30
-frequency = 200
-for _ in tqdm.tqdm(range(seconds*frequency)):
- leader_pos = robot.leader_arms["main"].read("Present_Position")
- robot.follower_arms["main"].write("Goal_Position", leader_pos)
-```
-
-*Using `teleop_step` for Teleoperation*
-
-Alternatively, you can teleoperate the robot using the `teleop_step` method from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py).
-
-Run this code to teleoperate:
-```python
-for _ in tqdm.tqdm(range(seconds*frequency)):
- robot.teleop_step()
-```
-
-*Recording data during Teleoperation*
-
-Teleoperation is particularly useful for recording data. You can use the `teleop_step(record_data=True)` to returns both the follower arm's position as `"observation.state"` and the leader arm's position as `"action"`. This function also converts the numpy arrays into PyTorch tensors. If you're working with a robot that has two leader and two follower arms (like the Aloha), the positions are concatenated.
-
-Run the following code to see how slowly moving the leader arm affects the observation and action:
-```python
-leader_pos = robot.leader_arms["main"].read("Present_Position")
-follower_pos = robot.follower_arms["main"].read("Present_Position")
-observation, action = robot.teleop_step(record_data=True)
-
-print(follower_pos)
-print(observation)
-print(leader_pos)
-print(action)
-```
-
-Expected output:
-```
-array([7.8223, 131.1328, 165.5859, -23.4668, -0.9668, 32.4316], dtype=float32)
-{'observation.state': tensor([7.8223, 131.1328, 165.5859, -23.4668, -0.9668, 32.4316])}
-array([3.4277, 134.1211, 179.8242, -18.5449, -1.5820, 34.7168], dtype=float32)
-{'action': tensor([3.4277, 134.1211, 179.8242, -18.5449, -1.5820, 34.7168])}
-```
-
-*Asynchronous Frame Recording*
-
-Additionally, `teleop_step` can asynchronously record frames from multiple cameras and include them in the observation dictionary as `"observation.images.CAMERA_NAME"`. This feature will be covered in more detail in the next section.
-
-*Disconnecting the Robot*
-
-When you're finished, make sure to disconnect your robot by running:
-```python
-robot.disconnect()
-```
-
-Alternatively, you can unplug the power cord, which will also disable torque.
-
-*/!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
-
-### c. Add your cameras with OpenCVCamera
-
-**(Optional) Use your phone as camera on Linux**
-
-If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
-
-1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
-```python
-sudo apt install v4l2loopback-dkms v4l-utils
-```
-2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
-3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
-```python
-flatpak install flathub com.obsproject.Studio
-```
-4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
-```python
-flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
-```
-5. *Start OBS Studio*. Launch with:
-```python
-flatpak run com.obsproject.Studio
-```
-6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
-7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
-8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
-9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
-```python
-v4l2-ctl --list-devices
-```
-You should see an entry like:
-```
-VirtualCam (platform:v4l2loopback-000):
-/dev/video1
-```
-10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
-```python
-v4l2-ctl -d /dev/video1 --get-fmt-video
-```
-You should see an entry like:
-```
->>> Format Video Capture:
->>> Width/Height : 640/480
->>> Pixel Format : 'YUYV' (YUYV 4:2:2)
-```
-
-Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
-
-If everything is set up correctly, you can proceed with the rest of the tutorial.
-
-**(Optional) Use your iPhone as a camera on MacOS**
-
-To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
-- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
-- Sign in both devices with the same Apple ID.
-- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
-
-For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
-
-Your iPhone should be detected automatically when running the camera setup script in the next section.
-
-**Instantiate an OpenCVCamera**
-
-The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
-
-To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system.
-
-To find the camera indices, run the following utility script, which will save a few frames from each detected camera:
-```bash
-python lerobot/common/robot_devices/cameras/opencv.py \
- --images-dir outputs/images_from_opencv_cameras
-```
-
-The output will look something like this if you have two cameras connected:
-```
-Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60
-[...]
-Camera found at index 0
-Camera found at index 1
-[...]
-Connecting cameras
-OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb)
-OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb)
-Saving images to outputs/images_from_opencv_cameras
-Frame: 0000 Latency (ms): 39.52
-[...]
-Frame: 0046 Latency (ms): 40.07
-Images have been saved to outputs/images_from_opencv_cameras
-```
-
-Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`):
-```
-camera_00_frame_000000.png
-[...]
-camera_00_frame_000047.png
-camera_01_frame_000000.png
-[...]
-camera_01_frame_000047.png
-```
-
-Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
-
-Finally, run this code to instantiate and connectyour camera:
-```python
-from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
-
-camera = OpenCVCamera(camera_index=0)
-camera.connect()
-color_image = camera.read()
-
-print(color_image.shape)
-print(color_image.dtype)
-```
-
-Expected output for a laptop camera on MacBookPro:
-```
-(1080, 1920, 3)
-uint8
-```
-
-Or like this if you followed our tutorial to set a virtual camera:
-```
-(480, 640, 3)
-uint8
-```
-
-With certain camera, you can also specify additional parameters like frame rate, resolution, and color mode during instantiation. For instance:
-```python
-camera = OpenCVCamera(camera_index=0, fps=30, width=640, height=480)
-```
-
-If the provided arguments are not compatible with the camera, an exception will be raised.
-
-*Disconnecting the camera*
-
-When you're done using the camera, disconnect it by running:
-```python
-camera.disconnect()
-```
-
-**Instantiate your robot with cameras**
-
-Additionaly, you can set up your robot to work with your cameras.
-
-Modify the following Python code with the appropriate camera names and configurations:
-```python
-robot = ManipulatorRobot(
- leader_arms={"main": leader_arm},
- follower_arms={"main": follower_arm},
- calibration_dir=".cache/calibration/koch",
- cameras={
- "laptop": OpenCVCamera(0, fps=30, width=640, height=480),
- "phone": OpenCVCamera(1, fps=30, width=640, height=480),
- },
-)
-robot.connect()
-```
-
-As a result, `teleop_step(record_data=True` will return a frame for each camera following the pytorch "channel first" convention but we keep images in `uint8` with pixels in range [0,255] to easily save them.
-
-Modify this code with the names of your cameras and run it:
-```python
-observation, action = robot.teleop_step(record_data=True)
-print(observation["observation.images.laptop"].shape)
-print(observation["observation.images.phone"].shape)
-print(observation["observation.images.laptop"].min().item())
-print(observation["observation.images.laptop"].max().item())
-```
-
-The output should look like this:
-```
-torch.Size([3, 480, 640])
-torch.Size([3, 480, 640])
-0
-255
-```
-
-Also, update the following lines of the yaml file for Koch robot [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the names and configurations of your cameras:
-```yaml
-[...]
-cameras:
- laptop:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 0
- fps: 30
- width: 640
- height: 480
- phone:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 1
- fps: 30
- width: 640
- height: 480
-```
-
-This file is used to instantiate your robot in all our scripts. We will explain how this works in the next section.
-
-### d. Use `koch.yaml` and our `teleoperate` function
-
-Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the path to the robot yaml file (e.g. [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml)) and control your robot with various modes as explained next.
-
-Try running this code to teleoperate your robot (if you dont have a camera, keep reading):
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/koch.yaml
-```
-
-You will see a lot of lines appearing like this one:
-```
-INFO 2024-08-10 11:15:03 ol_robot.py:209 dt: 5.12 (195.1hz) dtRlead: 4.93 (203.0hz) dtRfoll: 0.19 (5239.0hz)
-```
-
-It contains
-- `2024-08-10 11:15:03` which is the date and time of the call to the print function.
-- `ol_robot.py:209` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `209`).
-- `dt: 5.12 (195.1hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step()` and the current one, associated with the frequency (5.12 ms equals 195.1 Hz) ; note that you can control the maximum frequency by adding fps as argument such as `--fps 30`.
-- `dtRlead: 4.93 (203.0hz)` which is the number of milliseconds it took to read the position of the leader arm using `leader_arm.read("Present_Position")`.
-- `dtWfoll: 0.22 (4446.9hz)` which is the number of milliseconds it took to set a new goal position for the follower arm using `follower_arm.write("Goal_position", leader_pos)` ; note that writing is done asynchronously so it takes less time than reading.
-
-Note: you can override any entry in the yaml file using `--robot-overrides` and the [hydra.cc](https://hydra.cc/docs/advanced/override_grammar/basic) syntax. If needed, you can override the ports like this:
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/koch.yaml \
- --robot-overrides \
- leader_arms.main.port=/dev/tty.usbmodem575E0031751 \
- follower_arms.main.port=/dev/tty.usbmodem575E0032081
-```
-
-Importantly: If you don't have any camera, you can remove them dynamically with this [hydra.cc](https://hydra.cc/docs/advanced/override_grammar/basic) syntax `'~cameras'`:
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/koch.yaml \
- --robot-overrides \
- '~cameras'
-```
-
-We advise to create a new yaml file when the command becomes too long.
-
-## 3. Record your Dataset and Visualize it
-
-Using what you've learned previously, you can now easily record a dataset of states and actions for one episode. You can use `busy_wait` to control the speed of teleoperation and record at a fixed `fps` (frame per seconds).
-
-Try this code to record 30 seconds at 60 fps:
-```python
-import time
-from lerobot.scripts.control_robot import busy_wait
-
-record_time_s = 30
-fps = 60
-
-states = []
-actions = []
-for _ in range(record_time_s * fps):
- start_time = time.perf_counter()
- observation, action = robot.teleop_step(record_data=True)
-
- states.append(observation["observation.state"])
- actions.append(action["action"])
-
- dt_s = time.perf_counter() - start_time
- busy_wait(1 / fps - dt_s)
-
-# Note that observation and action are available in RAM, but
-# you could potentially store them on disk with pickle/hdf5 or
-# our optimized format `LeRobotDataset`. More on this next.
-```
-
-Importantly, many utilities are still missing. For instance, if you have cameras, you will need to save the images on disk to not go out of RAM, and to do so in threads to not slow down communication with your robot. Also, you will need to store your data in a format optimized for training and web sharing like [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py). More on this in the next section.
-
-### a. Use `koch.yaml` and the `record` function
-
-You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to achieve efficient data recording. It encompasses many recording utilities:
-1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of recording.
-2. Video streams from cameras are displayed in window so that you can verify them.
-3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--push-to-hub 0` is provided).
-4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again. You can also use `--force-override 1` to start recording from scratch.
-5. Set the flow of data recording using command line arguments:
- - `--warmup-time-s` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default).
- - `--episode-time-s` defines the number of seconds for data recording for each episode (60 seconds by default).
- - `--reset-time-s` defines the number of seconds for resetting the environment after each episode (60 seconds by default).
- - `--num-episodes` defines the number of episodes to record (50 by default).
-6. Control the flow during data recording using keyboard keys:
- - Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording.
- - Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it.
- - Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading.
-7. Similarly to `teleoperate`, you can also use `--robot-path` and `--robot-overrides` to specify your robots.
-
-Before trying `record`, if you want to push your dataset to the hub, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
-```bash
-huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
-```
-Also, store your Hugging Face repository name in a variable (e.g. `cadene` or `lerobot`). For instance, run this to use your Hugging Face user name as repository:
-```bash
-HF_USER=$(huggingface-cli whoami | head -n 1)
-echo $HF_USER
-```
-If you don't want to push to hub, use `--push-to-hub 0`.
-
-Now run this to record 2 episodes:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/koch.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/koch_test \
- --tags tutorial \
- --warmup-time-s 5 \
- --episode-time-s 30 \
- --reset-time-s 30 \
- --num-episodes 2
-```
-
-This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
-
-You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot
-
-Remember to add `--robot-overrides '~cameras'` if you don't have any cameras and you still use the default `koch.yaml` configuration.
-
-You will see a lot of lines appearing like this one:
-```
-INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz)
-```
-It contains:
-- `2024-08-10 15:02:58` which is the date and time of the call to the print function,
-- `ol_robot.py:219` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `219`).
-- `dt:33.34 (30.0hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step(record_data=True)` and the current one, associated with the frequency (33.34 ms equals 30.0 Hz) ; note that we use `--fps 30` so we expect 30.0 Hz ; when a step takes more time, the line appears in yellow.
-- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
-- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
-- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
-- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchrously.
-- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchrously.
-
-Troubleshooting:
-- On Linux, if you encounter a hanging issue when using cameras, uninstall opencv and re-install it with conda:
-```bash
-pip uninstall opencv-python
-conda install -c conda-forge opencv=4.10.0
-```
-- On Linux, if you encounter any issue during video encoding with `ffmpeg: unknown encoder libsvtav1`, you can:
- - install with conda-forge by running `conda install -c conda-forge ffmpeg` (it should be compiled with `libsvtav1`),
- - or, install [Homebrew](https://brew.sh) and run `brew install ffmpeg` (it should be compiled with `libsvtav1`),
- - or, install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1),
- - and, make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
-- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
-
-At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/koch_test) that you can obtain by running:
-```bash
-echo https://huggingface.co/datasets/${HF_USER}/koch_test
-```
-
-### b. Advices for recording dataset
-
-Once you're comfortable with data recording, it's time to create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings.
-
-In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
-
-Avoid adding too much variation too quickly, as it may hinder your results.
-
-In the coming months, we plan to release a foundational model for robotics. We anticipate that fine-tuning this model will enhance generalization, reducing the need for strict consistency during data collection.
-
-### c. Visualize all episodes
-
-You can visualize your dataset by running:
-```bash
-python lerobot/scripts/visualize_dataset_html.py \
- --repo-id ${HF_USER}/koch_test
-```
-
-This will launch a local web server that looks like this:
-
-
-
-
-### d. Replay episode on your robot with the `replay` function
-
-A useful feature of [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
-
-To replay the first episode of the dataset you just recorded, run the following command:
-```bash
-python lerobot/scripts/control_robot.py replay \
- --robot-path lerobot/configs/robot/koch.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/koch_test \
- --episode 0
-```
-
-Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
-
-## 4. Train a policy on your data
-
-### a. Use the `train` script
-
-To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
-```bash
-python lerobot/scripts/train.py \
- dataset_repo_id=${HF_USER}/koch_test \
- policy=act_koch_real \
- env=koch_real \
- hydra.run.dir=outputs/train/act_koch_test \
- hydra.job.name=act_koch_test \
- device=cuda \
- wandb.enable=true
-```
-
-Let's explain it:
-1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/koch_test`.
-2. We provided the policy with `policy=act_koch_real`. This loads configurations from [`lerobot/configs/policy/act_koch_real.yaml`](../lerobot/configs/policy/act_koch_real.yaml). Importantly, this policy uses 2 cameras as input `laptop` and `phone`. If your dataset has different cameras, update the yaml file to account for it in the following parts:
-```yaml
-...
-override_dataset_stats:
- observation.images.laptop:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.phone:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-...
- input_shapes:
- observation.images.laptop: [3, 480, 640]
- observation.images.phone: [3, 480, 640]
-...
- input_normalization_modes:
- observation.images.laptop: mean_std
- observation.images.phone: mean_std
-...
-```
-3. We provided an environment as argument with `env=koch_real`. This loads configurations from [`lerobot/configs/env/koch_real.yaml`](../lerobot/configs/env/koch_real.yaml). It looks like
-```yaml
-fps: 30
-env:
- name: real_world
- task: null
- state_dim: 6
- action_dim: 6
- fps: ${fps}
-```
-It should match your dataset (e.g. `fps: 30`) and your robot (e.g. `state_dim: 6` and `action_dim: 6`). We are still working on simplifying this in future versions of `lerobot`.
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
-5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
-
-For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
-
-### b. (Optional) Upload policy checkpoints to the hub
-
-Once training is done, upload the latest checkpoint with:
-```bash
-huggingface-cli upload ${HF_USER}/act_koch_test \
- outputs/train/act_koch_test/checkpoints/last/pretrained_model
-```
-
-You can also upload intermediate checkpoints with:
-```bash
-CKPT=010000
-huggingface-cli upload ${HF_USER}/act_koch_test_${CKPT} \
- outputs/train/act_koch_test/checkpoints/${CKPT}/pretrained_model
-```
-
-## 5. Evaluate your policy
-
-Now that you have a policy checkpoint, you can easily control your robot with it using methods from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) and the policy.
-
-Try this code for running inference for 60 seconds at 30 fps:
-```python
-from lerobot.common.policies.act.modeling_act import ACTPolicy
-
-inference_time_s = 60
-fps = 30
-device = "cuda" # TODO: On Mac, use "mps" or "cpu"
-
-ckpt_path = "outputs/train/act_koch_test/checkpoints/last/pretrained_model"
-policy = ACTPolicy.from_pretrained(ckpt_path)
-policy.to(device)
-
-for _ in range(inference_time_s * fps):
- start_time = time.perf_counter()
-
- # Read the follower state and access the frames from the cameras
- observation = robot.capture_observation()
-
- # Convert to pytorch format: channel first and float32 in [0,1]
- # with batch dimension
- for name in observation:
- if "image" in name:
- observation[name] = observation[name].type(torch.float32) / 255
- observation[name] = observation[name].permute(2, 0, 1).contiguous()
- observation[name] = observation[name].unsqueeze(0)
- observation[name] = observation[name].to(device)
-
- # Compute the next action with the policy
- # based on the current observation
- action = policy.select_action(observation)
- # Remove batch dimension
- action = action.squeeze(0)
- # Move to cpu, if not already the case
- action = action.to("cpu")
- # Order the robot to move
- robot.send_action(action)
-
- dt_s = time.perf_counter() - start_time
- busy_wait(1 / fps - dt_s)
-```
-
-### a. Use `koch.yaml` and our `record` function
-
-Ideally, when controlling your robot with your neural network, you would want to record evaluation episodes and to be able to visualize them later on, or even train on them like in Reinforcement Learning. This pretty much corresponds to recording a new dataset but with a neural network providing the actions instead of teleoperation.
-
-To this end, you can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/koch.yaml \
- --fps 30 \
- --repo-id ${HF_USER}/eval_koch_test \
- --tags tutorial eval \
- --warmup-time-s 5 \
- --episode-time-s 30 \
- --reset-time-s 30 \
- --num-episodes 10 \
- -p outputs/train/act_koch_test/checkpoints/last/pretrained_model
-```
-
-As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
-1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_koch_test`).
-2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_koch_test`).
-
-### b. Visualize evaluation afterwards
-
-You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument:
-```bash
-python lerobot/scripts/visualize_dataset.py \
- --repo-id ${HF_USER}/eval_koch_test
-```
-
-## 6. Next step
-
-Join our [Discord](https://discord.com/invite/s3KuuzsPFb) to collaborate on data collection and help us train a fully open-source foundational models for robotics!
diff --git a/examples/8_use_stretch.md b/examples/8_use_stretch.md
deleted file mode 100644
index c2c306f071..0000000000
--- a/examples/8_use_stretch.md
+++ /dev/null
@@ -1,156 +0,0 @@
-This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot.
-
-## Setup
-
-Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
-
-To use LeRobot on Stretch, 3 options are available:
-- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
-- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
-- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
-
-
-## Install LeRobot
-
-On Stretch's CLI, follow these steps:
-
-1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
-```bash
-mkdir -p ~/miniconda3
-wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
-bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
-rm ~/miniconda3/miniconda.sh
-~/miniconda3/bin/conda init bash
-```
-
-2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
-```
-# set PATH so it includes user's private bin if it exists
-if [ -d "$HOME/.local/bin" ] ; then
- PATH="$HOME/.local/bin:$PATH"
-fi
-```
-
-3. Restart shell or `source ~/.bashrc`
-
-4. Create and activate a fresh conda environment for lerobot
-```bash
-conda create -y -n lerobot python=3.10 && conda activate lerobot
-```
-
-5. Clone LeRobot:
-```bash
-git clone https://github.com/huggingface/lerobot.git ~/lerobot
-```
-
-6. Install LeRobot with stretch dependencies:
-```bash
-cd ~/lerobot && pip install -e ".[stretch]"
-```
-
-> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
-
-For Linux only (not Mac), install extra dependencies for recording datasets:
-```bash
-conda install -y -c conda-forge ffmpeg
-pip uninstall -y opencv-python
-conda install -y -c conda-forge "opencv>=4.10.0"
-```
-
-7. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
-```bash
-stretch_system_check.py
-```
-
-> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
-
-You should get something like this:
-```bash
-For use with S T R E T C H (R) from Hello Robot Inc.
----------------------------------------------------------------------
-
-Model = Stretch 3
-Tool = DexWrist 3 w/ Gripper
-Serial Number = stretch-se3-3054
-
----- Checking Hardware ----
-[Pass] Comms are ready
-[Pass] Actuators are ready
-[Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5)
-[Pass] Battery voltage is 13.6 V
-
----- Checking Software ----
-[Pass] Ubuntu 22.04 is ready
-[Pass] All APT pkgs are setup correctly
-[Pass] Firmware is up-to-date
-[Pass] Python pkgs are up-to-date
-[Pass] ROS2 Humble is ready
-```
-
-## Teleoperate, record a dataset and run a policy
-
-**Calibrate (Optional)**
-Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
-```bash
-python lerobot/scripts/control_robot.py calibrate \
- --robot-path lerobot/configs/robot/stretch.yaml
-```
-This is equivalent to running `stretch_robot_home.py`
-
-> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first.
-
-**Teleoperate**
-Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
-
-Now try out teleoperation (see above documentation to learn about the gamepad controls):
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/stretch.yaml
-```
-This is essentially the same as running `stretch_gamepad_teleop.py`
-
-**Record a dataset**
-Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
-
-If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
-```bash
-huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
-```
-
-Store your Hugging Face repository name in a variable to run these commands:
-```bash
-HF_USER=$(huggingface-cli whoami | head -n 1)
-echo $HF_USER
-```
-
-Record one episode:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/stretch.yaml \
- --fps 20 \
- --repo-id ${HF_USER}/stretch_test \
- --tags stretch tutorial \
- --warmup-time-s 3 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 1 \
- --push-to-hub 0
-```
-
-> **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup).
-
-**Replay an episode**
-Now try to replay this episode (make sure the robot's initial position is the same):
-```bash
-python lerobot/scripts/control_robot.py replay \
- --robot-path lerobot/configs/robot/stretch.yaml \
- --fps 20 \
- --repo-id ${HF_USER}/stretch_test \
- --episode 0
-```
-
-Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch.
-
-> TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files
-
-If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`.
diff --git a/examples/9_use_aloha.md b/examples/9_use_aloha.md
deleted file mode 100644
index f531a2c1d5..0000000000
--- a/examples/9_use_aloha.md
+++ /dev/null
@@ -1,174 +0,0 @@
-This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot.
-
-## Setup
-
-Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer.
-
-
-## Install LeRobot
-
-On your computer:
-
-1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
-```bash
-mkdir -p ~/miniconda3
-wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
-bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
-rm ~/miniconda3/miniconda.sh
-~/miniconda3/bin/conda init bash
-```
-
-2. Restart shell or `source ~/.bashrc`
-
-3. Create and activate a fresh conda environment for lerobot
-```bash
-conda create -y -n lerobot python=3.10 && conda activate lerobot
-```
-
-4. Clone LeRobot:
-```bash
-git clone https://github.com/huggingface/lerobot.git ~/lerobot
-```
-
-5. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense):
-```bash
-cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]"
-```
-
-For Linux only (not Mac), install extra dependencies for recording datasets:
-```bash
-conda install -y -c conda-forge ffmpeg
-pip uninstall -y opencv-python
-conda install -y -c conda-forge "opencv>=4.10.0"
-```
-
-## Teleoperate
-
-**/!\ FOR SAFETY, READ THIS /!\**
-Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly:
-1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms,
-2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics.
-
-By running the following code, you can start your first **SAFE** teleoperation:
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/aloha.yaml \
- --robot-overrides max_relative_target=5
-```
-
-By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line:
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --robot-path lerobot/configs/robot/aloha.yaml \
- --robot-overrides max_relative_target=null
-```
-
-## Record a dataset
-
-Once you're familiar with teleoperation, you can record your first dataset with Aloha.
-
-If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
-```bash
-huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
-```
-
-Store your Hugging Face repository name in a variable to run these commands:
-```bash
-HF_USER=$(huggingface-cli whoami | head -n 1)
-echo $HF_USER
-```
-
-Record 2 episodes and upload your dataset to the hub:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/aloha.yaml \
- --robot-overrides max_relative_target=null \
- --fps 30 \
- --repo-id ${HF_USER}/aloha_test \
- --tags aloha tutorial \
- --warmup-time-s 5 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 2 \
- --push-to-hub 1
-```
-
-## Visualize a dataset
-
-If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
-```bash
-echo ${HF_USER}/aloha_test
-```
-
-If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
-```bash
-python lerobot/scripts/visualize_dataset_html.py \
- --repo-id ${HF_USER}/aloha_test
-```
-
-## Replay an episode
-
-**/!\ FOR SAFETY, READ THIS /!\**
-Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot-overrides max_relative_target=5` to your command line as explained above.
-
-Now try to replay the first episode on your robot:
-```bash
-python lerobot/scripts/control_robot.py replay \
- --robot-path lerobot/configs/robot/aloha.yaml \
- --robot-overrides max_relative_target=null \
- --fps 30 \
- --repo-id ${HF_USER}/aloha_test \
- --episode 0
-```
-
-## Train a policy
-
-To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
-```bash
-python lerobot/scripts/train.py \
- dataset_repo_id=${HF_USER}/aloha_test \
- policy=act_aloha_real \
- env=aloha_real \
- hydra.run.dir=outputs/train/act_aloha_test \
- hydra.job.name=act_aloha_test \
- device=cuda \
- wandb.enable=true
-```
-
-Let's explain it:
-1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/aloha_test`.
-2. We provided the policy with `policy=act_aloha_real`. This loads configurations from [`lerobot/configs/policy/act_aloha_real.yaml`](../lerobot/configs/policy/act_aloha_real.yaml). Importantly, this policy uses 4 cameras as input `cam_right_wrist`, `cam_left_wrist`, `cam_high`, and `cam_low`.
-3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity.
-4. We provided `device=cuda` since we are training on a Nvidia GPU.
-5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
-
-Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`.
-
-## Evaluate your policy
-
-You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
-```bash
-python lerobot/scripts/control_robot.py record \
- --robot-path lerobot/configs/robot/aloha.yaml \
- --robot-overrides max_relative_target=null \
- --fps 30 \
- --repo-id ${HF_USER}/eval_act_aloha_test \
- --tags aloha tutorial eval \
- --warmup-time-s 5 \
- --episode-time-s 40 \
- --reset-time-s 10 \
- --num-episodes 10 \
- --num-image-writer-processes 1 \
- -p outputs/train/act_aloha_test/checkpoints/last/pretrained_model
-```
-
-As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
-1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_aloha_test`).
-2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_aloha_test`).
-3. We use `--num-image-writer-processes 1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--num-image-writer-processes`.
-
-## More
-
-Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination.
-
-If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
diff --git a/examples/advanced/1_train_act_pusht/act_pusht.yaml b/examples/advanced/1_train_act_pusht/act_pusht.yaml
deleted file mode 100644
index 4963e11c02..0000000000
--- a/examples/advanced/1_train_act_pusht/act_pusht.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-# @package _global_
-
-# Change the seed to match what PushT eval uses
-# (to avoid evaluating on seeds used for generating the training data).
-seed: 100000
-# Change the dataset repository to the PushT one.
-dataset_repo_id: lerobot/pusht
-
-override_dataset_stats:
- observation.image:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-training:
- offline_steps: 80000
- online_steps: 0
- eval_freq: 10000
- save_freq: 100000
- log_freq: 250
- save_model: true
-
- batch_size: 8
- lr: 1e-5
- lr_backbone: 1e-5
- weight_decay: 1e-4
- grad_clip_norm: 10
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- action: "[i / ${fps} for i in range(${policy.chunk_size})]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-# See `configuration_act.py` for more details.
-policy:
- name: act
-
- # Input / output structure.
- n_obs_steps: 1
- chunk_size: 100 # chunk_size
- n_action_steps: 100
-
- input_shapes:
- observation.image: [3, 96, 96]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.image: mean_std
- # Use min_max normalization just because it's more standard.
- observation.state: min_max
- output_normalization_modes:
- # Use min_max normalization just because it's more standard.
- action: min_max
-
- # Architecture.
- # Vision backbone.
- vision_backbone: resnet18
- pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
- replace_final_stride_with_dilation: false
- # Transformer layers.
- pre_norm: false
- dim_model: 512
- n_heads: 8
- dim_feedforward: 3200
- feedforward_activation: relu
- n_encoder_layers: 4
- # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
- # that means only the first layer is used. Here we match the original implementation by setting this to 1.
- # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
- n_decoder_layers: 1
- # VAE.
- use_vae: true
- latent_dim: 32
- n_vae_encoder_layers: 4
-
- # Inference.
- temporal_ensemble_coeff: null
-
- # Training and loss computation.
- dropout: 0.1
- kl_weight: 10.0
diff --git a/examples/advanced/1_train_act_pusht/train_act_pusht.md b/examples/advanced/1_train_act_pusht/train_act_pusht.md
deleted file mode 100644
index 0258c9916b..0000000000
--- a/examples/advanced/1_train_act_pusht/train_act_pusht.md
+++ /dev/null
@@ -1,70 +0,0 @@
-In this tutorial we will learn how to adapt a policy configuration to be compatible with a new environment and dataset. As a concrete example, we will adapt the default configuration for ACT to be compatible with the PushT environment and dataset.
-
-If you haven't already read our tutorial on the [training script and configuration tooling](../4_train_policy_with_script.md) please do so prior to tackling this tutorial.
-
-Let's get started!
-
-Suppose we want to train ACT for PushT. Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
-
-```bash
-python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht
-```
-
-We need to adapt the parameters of the ACT policy configuration to the PushT environment. The most important ones are the image keys.
-
-ALOHA's datasets and environments typically use a variable number of cameras. In `lerobot/configs/policy/act.yaml` you may notice two relevant sections. Here we show you the minimal diff needed to adjust to PushT:
-
-```diff
-override_dataset_stats:
-- observation.images.top:
-+ observation.image:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-policy:
- input_shapes:
-- observation.images.top: [3, 480, 640]
-+ observation.image: [3, 96, 96]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- input_normalization_modes:
-- observation.images.top: mean_std
-+ observation.image: mean_std
- observation.state: min_max
- output_normalization_modes:
- action: min_max
-```
-
-Here we've accounted for the following:
-- PushT uses "observation.image" for its image key.
-- PushT provides smaller images.
-
-_Side note: technically we could override these via the CLI, but with many changes it gets a bit messy, and we also have a bit of a challenge in that we're using `.` in our observation keys which is treated by Hydra as a hierarchical separator_.
-
-For your convenience, we provide [`act_pusht.yaml`](./act_pusht.yaml) in this directory. It contains the diff above, plus some other (optional) ones that are explained within. Please copy it into `lerobot/configs/policy` with:
-
-```bash
-cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/act_pusht.yaml
-```
-
-(remember from a [previous tutorial](../4_train_policy_with_script.md) that Hydra will look in the `lerobot/configs` directory). Now try running the following.
-
-
-```bash
-python lerobot/scripts/train.py policy=act_pusht env=pusht
-```
-
-Notice that this is much the same as the command that failed at the start of the tutorial, only:
-- Now we are using `policy=act_pusht` to point to our new configuration file.
-- We can drop `dataset_repo_id=lerobot/pusht` as the change is incorporated in our new configuration file.
-
-Hurrah! You're now training ACT for the PushT environment.
-
----
-
-The bottom line of this tutorial is that when training policies for different environments and datasets you will need to understand what parts of the policy configuration are specific to those and make changes accordingly.
-
-Happy coding! 🤗
diff --git a/examples/advanced/2_calculate_validation_loss.py b/examples/advanced/2_calculate_validation_loss.py
deleted file mode 100644
index 00ba9930fa..0000000000
--- a/examples/advanced/2_calculate_validation_loss.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
-
-This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
-is learning effectively.
-
-Furthermore, relying on validation loss to evaluate performance is generally not considered a good practice,
-especially in the context of imitation learning. The most reliable approach is to evaluate the policy directly
-on the target environment, whether that be in simulation or the real world.
-"""
-
-import math
-from pathlib import Path
-
-import torch
-from huggingface_hub import snapshot_download
-
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
-from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
-
-device = torch.device("cuda")
-
-# Download the diffusion policy for pusht environment
-pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))
-# OR uncomment the following to evaluate a policy from the local outputs/train folder.
-# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
-
-policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
-policy.eval()
-policy.to(device)
-
-# Set up the dataset.
-delta_timestamps = {
- # Load the previous image and state at -0.1 seconds before current frame,
- # then load current image and state corresponding to 0.0 second.
- "observation.image": [-0.1, 0.0],
- "observation.state": [-0.1, 0.0],
- # Load the previous action (-0.1), the next action to be executed (0.0),
- # and 14 future actions with a 0.1 seconds spacing. All these actions will be
- # used to calculate the loss.
- "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
-}
-
-# Load the last 10% of episodes of the dataset as a validation set.
-# - Load dataset metadata
-dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
-# - Calculate train and val episodes
-total_episodes = dataset_metadata.total_episodes
-episodes = list(range(dataset_metadata.total_episodes))
-num_train_episodes = math.floor(total_episodes * 90 / 100)
-train_episodes = episodes[:num_train_episodes]
-val_episodes = episodes[num_train_episodes:]
-print(f"Number of episodes in full dataset: {total_episodes}")
-print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}")
-print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}")
-# - Load train an val datasets
-train_dataset = LeRobotDataset("lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps)
-val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps)
-print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
-print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")
-
-# Create dataloader for evaluation.
-val_dataloader = torch.utils.data.DataLoader(
- val_dataset,
- num_workers=4,
- batch_size=64,
- shuffle=False,
- pin_memory=device != torch.device("cpu"),
- drop_last=False,
-)
-
-# Run validation loop.
-loss_cumsum = 0
-n_examples_evaluated = 0
-for batch in val_dataloader:
- batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
- output_dict = policy.forward(batch)
-
- loss_cumsum += output_dict["loss"].item()
- n_examples_evaluated += batch["index"].shape[0]
-
-# Calculate the average loss over the validation set.
-average_loss = loss_cumsum / n_examples_evaluated
-
-print(f"Average loss on validation set: {average_loss:.4f}")
diff --git a/examples/backward_compatibility/replay.py b/examples/backward_compatibility/replay.py
new file mode 100644
index 0000000000..cc3397543f
--- /dev/null
+++ b/examples/backward_compatibility/replay.py
@@ -0,0 +1,105 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Replays the actions of an episode from a dataset on a robot.
+
+Example:
+
+```shell
+python -m lerobot.replay \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=black \
+ --dataset.repo_id=aliberts/record-test \
+ --dataset.episode=2
+```
+"""
+
+import logging
+import time
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from pprint import pformat
+
+import draccus
+
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.robots import ( # noqa: F401
+ Robot,
+ RobotConfig,
+ koch_follower,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import (
+ init_logging,
+ log_say,
+)
+
+
+@dataclass
+class DatasetReplayConfig:
+ # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
+ repo_id: str
+ # Episode to replay.
+ episode: int
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
+ root: str | Path | None = None
+ # Limit the frames per second. By default, uses the policy fps.
+ fps: int = 30
+
+
+@dataclass
+class ReplayConfig:
+ robot: RobotConfig
+ dataset: DatasetReplayConfig
+ # Use vocal synthesis to read events.
+ play_sounds: bool = True
+
+
+@draccus.wrap()
+def replay(cfg: ReplayConfig):
+ init_logging()
+ logging.info(pformat(asdict(cfg)))
+
+ robot = make_robot_from_config(cfg.robot)
+ dataset = LeRobotDataset(cfg.dataset.repo_id, root=cfg.dataset.root, episodes=[cfg.dataset.episode])
+ actions = dataset.hf_dataset.select_columns("action")
+ robot.connect()
+
+ log_say("Replaying episode", cfg.play_sounds, blocking=True)
+ for idx in range(dataset.num_frames):
+ start_episode_t = time.perf_counter()
+
+ action_array = actions[idx]["action"]
+ action = {}
+ for i, name in enumerate(dataset.features["action"]["names"]):
+ key = f"{name.removeprefix('main_')}.pos"
+ action[key] = action_array[i].item()
+
+ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
+ action["elbow_flex.pos"] -= 90
+ robot.send_action(action)
+
+ dt_s = time.perf_counter() - start_episode_t
+ busy_wait(1 / dataset.fps - dt_s)
+
+ robot.disconnect()
+
+
+if __name__ == "__main__":
+ replay()
diff --git a/examples/lekiwi/evaluate.py b/examples/lekiwi/evaluate.py
new file mode 100644
index 0000000000..57fb62e108
--- /dev/null
+++ b/examples/lekiwi/evaluate.py
@@ -0,0 +1,90 @@
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import hw_to_dataset_features
+from lerobot.policies.act.modeling_act import ACTPolicy
+from lerobot.record import record_loop
+from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
+from lerobot.utils.control_utils import init_keyboard_listener
+from lerobot.utils.utils import log_say
+from lerobot.utils.visualization_utils import _init_rerun
+
+NUM_EPISODES = 2
+FPS = 30
+EPISODE_TIME_SEC = 60
+TASK_DESCRIPTION = "My task description"
+
+# Create the robot and teleoperator configurations
+robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
+robot = LeKiwiClient(robot_config)
+
+policy = ACTPolicy.from_pretrained("/")
+
+# Configure the dataset features
+action_features = hw_to_dataset_features(robot.action_features, "action")
+obs_features = hw_to_dataset_features(robot.observation_features, "observation")
+dataset_features = {**action_features, **obs_features}
+
+# Create the dataset
+dataset = LeRobotDataset.create(
+ repo_id="/",
+ fps=FPS,
+ features=dataset_features,
+ robot_type=robot.name,
+ use_videos=True,
+ image_writer_threads=4,
+)
+
+# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi`
+robot.connect()
+
+_init_rerun(session_name="recording")
+
+listener, events = init_keyboard_listener()
+
+if not robot.is_connected:
+ raise ValueError("Robot is not connected!")
+
+recorded_episodes = 0
+while recorded_episodes < NUM_EPISODES and not events["stop_recording"]:
+ log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}")
+
+ # Run the policy inference loop
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ policy=policy,
+ dataset=dataset,
+ control_time_s=EPISODE_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ # Logic for reset env
+ if not events["stop_recording"] and (
+ (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"]
+ ):
+ log_say("Reset the environment")
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ control_time_s=EPISODE_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ if events["rerecord_episode"]:
+ log_say("Re-record episode")
+ events["rerecord_episode"] = False
+ events["exit_early"] = False
+ dataset.clear_episode_buffer()
+ continue
+
+ dataset.save_episode()
+ recorded_episodes += 1
+
+# Upload to hub and clean up
+dataset.push_to_hub()
+
+robot.disconnect()
+listener.stop()
diff --git a/examples/lekiwi/record.py b/examples/lekiwi/record.py
new file mode 100644
index 0000000000..11a716761c
--- /dev/null
+++ b/examples/lekiwi/record.py
@@ -0,0 +1,101 @@
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import hw_to_dataset_features
+from lerobot.record import record_loop
+from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
+from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient
+from lerobot.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig
+from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
+from lerobot.utils.control_utils import init_keyboard_listener
+from lerobot.utils.utils import log_say
+from lerobot.utils.visualization_utils import _init_rerun
+
+NUM_EPISODES = 3
+FPS = 30
+EPISODE_TIME_SEC = 30
+RESET_TIME_SEC = 10
+TASK_DESCRIPTION = "My task description"
+
+# Create the robot and teleoperator configurations
+robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
+leader_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm")
+keyboard_config = KeyboardTeleopConfig()
+
+robot = LeKiwiClient(robot_config)
+leader_arm = SO100Leader(leader_arm_config)
+keyboard = KeyboardTeleop(keyboard_config)
+
+# Configure the dataset features
+action_features = hw_to_dataset_features(robot.action_features, "action")
+obs_features = hw_to_dataset_features(robot.observation_features, "observation")
+dataset_features = {**action_features, **obs_features}
+
+# Create the dataset
+dataset = LeRobotDataset.create(
+ repo_id="/",
+ fps=FPS,
+ features=dataset_features,
+ robot_type=robot.name,
+ use_videos=True,
+ image_writer_threads=4,
+)
+
+# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi`
+robot.connect()
+leader_arm.connect()
+keyboard.connect()
+
+_init_rerun(session_name="lekiwi_record")
+
+listener, events = init_keyboard_listener()
+
+if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected:
+ raise ValueError("Robot, leader arm of keyboard is not connected!")
+
+recorded_episodes = 0
+while recorded_episodes < NUM_EPISODES and not events["stop_recording"]:
+ log_say(f"Recording episode {recorded_episodes}")
+
+ # Run the record loop
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ dataset=dataset,
+ teleop=[leader_arm, keyboard],
+ control_time_s=EPISODE_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ # Logic for reset env
+ if not events["stop_recording"] and (
+ (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"]
+ ):
+ log_say("Reset the environment")
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=FPS,
+ teleop=[leader_arm, keyboard],
+ control_time_s=RESET_TIME_SEC,
+ single_task=TASK_DESCRIPTION,
+ display_data=True,
+ )
+
+ if events["rerecord_episode"]:
+ log_say("Re-record episode")
+ events["rerecord_episode"] = False
+ events["exit_early"] = False
+ dataset.clear_episode_buffer()
+ continue
+
+ dataset.save_episode()
+ recorded_episodes += 1
+
+# Upload to hub and clean up
+dataset.push_to_hub()
+
+robot.disconnect()
+leader_arm.disconnect()
+keyboard.disconnect()
+listener.stop()
diff --git a/examples/lekiwi/replay.py b/examples/lekiwi/replay.py
new file mode 100644
index 0000000000..248354df94
--- /dev/null
+++ b/examples/lekiwi/replay.py
@@ -0,0 +1,33 @@
+import time
+
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
+from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import log_say
+
+EPISODE_IDX = 0
+
+robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
+robot = LeKiwiClient(robot_config)
+
+dataset = LeRobotDataset("/", episodes=[EPISODE_IDX])
+actions = dataset.hf_dataset.select_columns("action")
+
+robot.connect()
+
+if not robot.is_connected:
+ raise ValueError("Robot is not connected!")
+
+log_say(f"Replaying episode {EPISODE_IDX}")
+for idx in range(dataset.num_frames):
+ t0 = time.perf_counter()
+
+ action = {
+ name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"])
+ }
+ robot.send_action(action)
+
+ busy_wait(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0))
+
+robot.disconnect()
diff --git a/examples/lekiwi/teleoperate.py b/examples/lekiwi/teleoperate.py
new file mode 100644
index 0000000000..8358a2b93e
--- /dev/null
+++ b/examples/lekiwi/teleoperate.py
@@ -0,0 +1,47 @@
+import time
+
+from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
+from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop, KeyboardTeleopConfig
+from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.visualization_utils import _init_rerun, log_rerun_data
+
+FPS = 30
+
+# Create the robot and teleoperator configurations
+robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="my_lekiwi")
+teleop_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm")
+keyboard_config = KeyboardTeleopConfig(id="my_laptop_keyboard")
+
+robot = LeKiwiClient(robot_config)
+leader_arm = SO100Leader(teleop_arm_config)
+keyboard = KeyboardTeleop(keyboard_config)
+
+# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi`
+robot.connect()
+leader_arm.connect()
+keyboard.connect()
+
+_init_rerun(session_name="lekiwi_teleop")
+
+if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected:
+ raise ValueError("Robot, leader arm of keyboard is not connected!")
+
+while True:
+ t0 = time.perf_counter()
+
+ observation = robot.get_observation()
+
+ arm_action = leader_arm.get_action()
+ arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
+
+ keyboard_keys = keyboard.get_action()
+ base_action = robot._from_keyboard_to_base_action(keyboard_keys)
+
+ log_rerun_data(observation, {**arm_action, **base_action})
+
+ action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
+
+ robot.send_action(action)
+
+ busy_wait(max(1.0 / FPS - (time.perf_counter() - t0), 0.0))
diff --git a/examples/port_datasets/pusht_zarr.py b/examples/port_datasets/pusht_zarr.py
deleted file mode 100644
index 60df984054..0000000000
--- a/examples/port_datasets/pusht_zarr.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import shutil
-from pathlib import Path
-
-import numpy as np
-import torch
-
-from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME, LeRobotDataset
-from lerobot.common.datasets.push_dataset_to_hub._download_raw import download_raw
-
-PUSHT_TASK = "Push the T-shaped blue block onto the T-shaped green target surface."
-PUSHT_FEATURES = {
- "observation.state": {
- "dtype": "float32",
- "shape": (2,),
- "names": {
- "axes": ["x", "y"],
- },
- },
- "action": {
- "dtype": "float32",
- "shape": (2,),
- "names": {
- "axes": ["x", "y"],
- },
- },
- "next.reward": {
- "dtype": "float32",
- "shape": (1,),
- "names": None,
- },
- "next.success": {
- "dtype": "bool",
- "shape": (1,),
- "names": None,
- },
- "observation.environment_state": {
- "dtype": "float32",
- "shape": (16,),
- "names": [
- "keypoints",
- ],
- },
- "observation.image": {
- "dtype": None,
- "shape": (3, 96, 96),
- "names": [
- "channel",
- "height",
- "width",
- ],
- },
-}
-
-
-def build_features(mode: str) -> dict:
- features = PUSHT_FEATURES
- if mode == "keypoints":
- features.pop("observation.image")
- else:
- features.pop("observation.environment_state")
- features["observation.image"]["dtype"] = mode
-
- return features
-
-
-def load_raw_dataset(zarr_path: Path):
- try:
- from lerobot.common.datasets.push_dataset_to_hub._diffusion_policy_replay_buffer import (
- ReplayBuffer as DiffusionPolicyReplayBuffer,
- )
- except ModuleNotFoundError as e:
- print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`")
- raise e
-
- zarr_data = DiffusionPolicyReplayBuffer.copy_from_path(zarr_path)
- return zarr_data
-
-
-def calculate_coverage(zarr_data):
- try:
- import pymunk
- from gym_pusht.envs.pusht import PushTEnv, pymunk_to_shapely
- except ModuleNotFoundError as e:
- print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`")
- raise e
-
- block_pos = zarr_data["state"][:, 2:4]
- block_angle = zarr_data["state"][:, 4]
-
- num_frames = len(block_pos)
-
- coverage = np.zeros((num_frames,))
- # 8 keypoints with 2 coords each
- keypoints = np.zeros((num_frames, 16))
-
- # Set x, y, theta (in radians)
- goal_pos_angle = np.array([256, 256, np.pi / 4])
- goal_body = PushTEnv.get_goal_pose_body(goal_pos_angle)
-
- for i in range(num_frames):
- space = pymunk.Space()
- space.gravity = 0, 0
- space.damping = 0
-
- # Add walls.
- walls = [
- PushTEnv.add_segment(space, (5, 506), (5, 5), 2),
- PushTEnv.add_segment(space, (5, 5), (506, 5), 2),
- PushTEnv.add_segment(space, (506, 5), (506, 506), 2),
- PushTEnv.add_segment(space, (5, 506), (506, 506), 2),
- ]
- space.add(*walls)
-
- block_body, block_shapes = PushTEnv.add_tee(space, block_pos[i].tolist(), block_angle[i].item())
- goal_geom = pymunk_to_shapely(goal_body, block_body.shapes)
- block_geom = pymunk_to_shapely(block_body, block_body.shapes)
- intersection_area = goal_geom.intersection(block_geom).area
- goal_area = goal_geom.area
- coverage[i] = intersection_area / goal_area
- keypoints[i] = torch.from_numpy(PushTEnv.get_keypoints(block_shapes).flatten())
-
- return coverage, keypoints
-
-
-def calculate_success(coverage: float, success_threshold: float):
- return coverage > success_threshold
-
-
-def calculate_reward(coverage: float, success_threshold: float):
- return np.clip(coverage / success_threshold, 0, 1)
-
-
-def main(raw_dir: Path, repo_id: str, mode: str = "video", push_to_hub: bool = True):
- if mode not in ["video", "image", "keypoints"]:
- raise ValueError(mode)
-
- if (LEROBOT_HOME / repo_id).exists():
- shutil.rmtree(LEROBOT_HOME / repo_id)
-
- if not raw_dir.exists():
- download_raw(raw_dir, repo_id="lerobot-raw/pusht_raw")
-
- zarr_data = load_raw_dataset(zarr_path=raw_dir / "pusht_cchi_v7_replay.zarr")
-
- env_state = zarr_data["state"][:]
- agent_pos = env_state[:, :2]
-
- action = zarr_data["action"][:]
- image = zarr_data["img"] # (b, h, w, c)
-
- episode_data_index = {
- "from": np.concatenate(([0], zarr_data.meta["episode_ends"][:-1])),
- "to": zarr_data.meta["episode_ends"],
- }
-
- # Calculate success and reward based on the overlapping area
- # of the T-object and the T-area.
- coverage, keypoints = calculate_coverage(zarr_data)
- success = calculate_success(coverage, success_threshold=0.95)
- reward = calculate_reward(coverage, success_threshold=0.95)
-
- features = build_features(mode)
- dataset = LeRobotDataset.create(
- repo_id=repo_id,
- fps=10,
- robot_type="2d pointer",
- features=features,
- image_writer_threads=4,
- )
- episodes = range(len(episode_data_index["from"]))
- for ep_idx in episodes:
- from_idx = episode_data_index["from"][ep_idx]
- to_idx = episode_data_index["to"][ep_idx]
- num_frames = to_idx - from_idx
-
- for frame_idx in range(num_frames):
- i = from_idx + frame_idx
- frame = {
- "action": torch.from_numpy(action[i]),
- # Shift reward and success by +1 until the last item of the episode
- "next.reward": reward[i + (frame_idx < num_frames - 1)],
- "next.success": success[i + (frame_idx < num_frames - 1)],
- }
-
- frame["observation.state"] = torch.from_numpy(agent_pos[i])
-
- if mode == "keypoints":
- frame["observation.environment_state"] = torch.from_numpy(keypoints[i])
- else:
- frame["observation.image"] = torch.from_numpy(image[i])
-
- dataset.add_frame(frame)
-
- dataset.save_episode(task=PUSHT_TASK)
-
- dataset.consolidate()
-
- if push_to_hub:
- dataset.push_to_hub()
-
-
-if __name__ == "__main__":
- # To try this script, modify the repo id with your own HuggingFace user (e.g cadene/pusht)
- repo_id = "lerobot/pusht"
-
- modes = ["video", "image", "keypoints"]
- # Uncomment if you want to try with a specific mode
- # modes = ["video"]
- # modes = ["image"]
- # modes = ["keypoints"]
-
- raw_dir = Path("data/lerobot-raw/pusht_raw")
- for mode in modes:
- if mode in ["image", "keypoints"]:
- repo_id += f"_{mode}"
-
- # download and load raw dataset, create LeRobotDataset, populate it, push to hub
- main(raw_dir, repo_id=repo_id, mode=mode)
-
- # Uncomment if you want to load the local dataset and explore it
- # dataset = LeRobotDataset(repo_id=repo_id, local_files_only=True)
- # breakpoint()
diff --git a/lerobot/__init__.py b/lerobot/__init__.py
deleted file mode 100644
index 3d5bb6aaa6..0000000000
--- a/lerobot/__init__.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library.
-We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables.
-
-Example:
- ```python
- import lerobot
- print(lerobot.available_envs)
- print(lerobot.available_tasks_per_env)
- print(lerobot.available_datasets)
- print(lerobot.available_datasets_per_env)
- print(lerobot.available_real_world_datasets)
- print(lerobot.available_policies)
- print(lerobot.available_policies_per_env)
- print(lerobot.available_robots)
- print(lerobot.available_cameras)
- print(lerobot.available_motors)
- ```
-
-When implementing a new dataset loadable with LeRobotDataset follow these steps:
-- Update `available_datasets_per_env` in `lerobot/__init__.py`
-
-When implementing a new environment (e.g. `gym_aloha`), follow these steps:
-- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
-
-When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
-- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
-- Set the required `name` class attribute.
-- Update variables in `tests/test_available.py` by importing your new Policy class
-"""
-
-import itertools
-
-from lerobot.__version__ import __version__ # noqa: F401
-
-# TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies`
-# refers to a yaml file AND a modeling name. Same for `available_envs` which refers to
-# a yaml file AND a environment name. The difference should be more obvious.
-available_tasks_per_env = {
- "aloha": [
- "AlohaInsertion-v0",
- "AlohaTransferCube-v0",
- ],
- "pusht": ["PushT-v0"],
- "xarm": ["XarmLift-v0"],
- "dora_aloha_real": ["DoraAloha-v0", "DoraKoch-v0", "DoraReachy2-v0"],
-}
-available_envs = list(available_tasks_per_env.keys())
-
-available_datasets_per_env = {
- "aloha": [
- "lerobot/aloha_sim_insertion_human",
- "lerobot/aloha_sim_insertion_scripted",
- "lerobot/aloha_sim_transfer_cube_human",
- "lerobot/aloha_sim_transfer_cube_scripted",
- "lerobot/aloha_sim_insertion_human_image",
- "lerobot/aloha_sim_insertion_scripted_image",
- "lerobot/aloha_sim_transfer_cube_human_image",
- "lerobot/aloha_sim_transfer_cube_scripted_image",
- ],
- # TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly
- # coupled with tests.
- "pusht": ["lerobot/pusht", "lerobot/pusht_image"],
- "xarm": [
- "lerobot/xarm_lift_medium",
- "lerobot/xarm_lift_medium_replay",
- "lerobot/xarm_push_medium",
- "lerobot/xarm_push_medium_replay",
- "lerobot/xarm_lift_medium_image",
- "lerobot/xarm_lift_medium_replay_image",
- "lerobot/xarm_push_medium_image",
- "lerobot/xarm_push_medium_replay_image",
- ],
- "dora_aloha_real": [
- "lerobot/aloha_static_battery",
- "lerobot/aloha_static_candy",
- "lerobot/aloha_static_coffee",
- "lerobot/aloha_static_coffee_new",
- "lerobot/aloha_static_cups_open",
- "lerobot/aloha_static_fork_pick_up",
- "lerobot/aloha_static_pingpong_test",
- "lerobot/aloha_static_pro_pencil",
- "lerobot/aloha_static_screw_driver",
- "lerobot/aloha_static_tape",
- "lerobot/aloha_static_thread_velcro",
- "lerobot/aloha_static_towel",
- "lerobot/aloha_static_vinh_cup",
- "lerobot/aloha_static_vinh_cup_left",
- "lerobot/aloha_static_ziploc_slide",
- ],
-}
-
-available_real_world_datasets = [
- "lerobot/aloha_mobile_cabinet",
- "lerobot/aloha_mobile_chair",
- "lerobot/aloha_mobile_elevator",
- "lerobot/aloha_mobile_shrimp",
- "lerobot/aloha_mobile_wash_pan",
- "lerobot/aloha_mobile_wipe_wine",
- "lerobot/aloha_static_battery",
- "lerobot/aloha_static_candy",
- "lerobot/aloha_static_coffee",
- "lerobot/aloha_static_coffee_new",
- "lerobot/aloha_static_cups_open",
- "lerobot/aloha_static_fork_pick_up",
- "lerobot/aloha_static_pingpong_test",
- "lerobot/aloha_static_pro_pencil",
- "lerobot/aloha_static_screw_driver",
- "lerobot/aloha_static_tape",
- "lerobot/aloha_static_thread_velcro",
- "lerobot/aloha_static_towel",
- "lerobot/aloha_static_vinh_cup",
- "lerobot/aloha_static_vinh_cup_left",
- "lerobot/aloha_static_ziploc_slide",
- "lerobot/umi_cup_in_the_wild",
- "lerobot/unitreeh1_fold_clothes",
- "lerobot/unitreeh1_rearrange_objects",
- "lerobot/unitreeh1_two_robot_greeting",
- "lerobot/unitreeh1_warehouse",
- "lerobot/nyu_rot_dataset",
- "lerobot/utokyo_saytap",
- "lerobot/imperialcollege_sawyer_wrist_cam",
- "lerobot/utokyo_xarm_bimanual",
- "lerobot/tokyo_u_lsmo",
- "lerobot/utokyo_pr2_opening_fridge",
- "lerobot/cmu_franka_exploration_dataset",
- "lerobot/cmu_stretch",
- "lerobot/asu_table_top",
- "lerobot/utokyo_pr2_tabletop_manipulation",
- "lerobot/utokyo_xarm_pick_and_place",
- "lerobot/ucsd_kitchen_dataset",
- "lerobot/austin_buds_dataset",
- "lerobot/dlr_sara_grid_clamp",
- "lerobot/conq_hose_manipulation",
- "lerobot/columbia_cairlab_pusht_real",
- "lerobot/dlr_sara_pour",
- "lerobot/dlr_edan_shared_control",
- "lerobot/ucsd_pick_and_place_dataset",
- "lerobot/berkeley_cable_routing",
- "lerobot/nyu_franka_play_dataset",
- "lerobot/austin_sirius_dataset",
- "lerobot/cmu_play_fusion",
- "lerobot/berkeley_gnm_sac_son",
- "lerobot/nyu_door_opening_surprising_effectiveness",
- "lerobot/berkeley_fanuc_manipulation",
- "lerobot/jaco_play",
- "lerobot/viola",
- "lerobot/kaist_nonprehensile",
- "lerobot/berkeley_mvp",
- "lerobot/uiuc_d3field",
- "lerobot/berkeley_gnm_recon",
- "lerobot/austin_sailor_dataset",
- "lerobot/utaustin_mutex",
- "lerobot/roboturk",
- "lerobot/stanford_hydra_dataset",
- "lerobot/berkeley_autolab_ur5",
- "lerobot/stanford_robocook",
- "lerobot/toto",
- "lerobot/fmb",
- "lerobot/droid_100",
- "lerobot/berkeley_rpt",
- "lerobot/stanford_kuka_multimodal_dataset",
- "lerobot/iamlab_cmu_pickup_insert",
- "lerobot/taco_play",
- "lerobot/berkeley_gnm_cory_hall",
- "lerobot/usc_cloth_sim",
-]
-
-available_datasets = sorted(
- set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets))
-)
-
-# lists all available policies from `lerobot/common/policies`
-available_policies = [
- "act",
- "diffusion",
- "tdmpc",
- "vqbet",
-]
-
-# lists all available robots from `lerobot/common/robot_devices/robots`
-available_robots = [
- "koch",
- "koch_bimanual",
- "aloha",
- "so100",
- "moss",
-]
-
-# lists all available cameras from `lerobot/common/robot_devices/cameras`
-available_cameras = [
- "opencv",
- "intelrealsense",
-]
-
-# lists all available motors from `lerobot/common/robot_devices/motors`
-available_motors = [
- "dynamixel",
- "feetech",
-]
-
-# keys and values refer to yaml files
-available_policies_per_env = {
- "aloha": ["act"],
- "pusht": ["diffusion", "vqbet"],
- "xarm": ["tdmpc"],
- "koch_real": ["act_koch_real"],
- "aloha_real": ["act_aloha_real"],
- "dora_aloha_real": ["act_aloha_real"],
-}
-
-env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
-env_dataset_pairs = [
- (env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets
-]
-env_dataset_policy_triplets = [
- (env, dataset, policy)
- for env, datasets in available_datasets_per_env.items()
- for dataset in datasets
- for policy in available_policies_per_env[env]
-]
diff --git a/lerobot/common/datasets/compute_stats.py b/lerobot/common/datasets/compute_stats.py
deleted file mode 100644
index c621169944..0000000000
--- a/lerobot/common/datasets/compute_stats.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from copy import deepcopy
-from math import ceil
-
-import einops
-import torch
-import tqdm
-
-
-def get_stats_einops_patterns(dataset, num_workers=0):
- """These einops patterns will be used to aggregate batches and compute statistics.
-
- Note: We assume the images are in channel first format
- """
-
- dataloader = torch.utils.data.DataLoader(
- dataset,
- num_workers=num_workers,
- batch_size=2,
- shuffle=False,
- )
- batch = next(iter(dataloader))
-
- stats_patterns = {}
-
- for key in dataset.features:
- # sanity check that tensors are not float64
- assert batch[key].dtype != torch.float64
-
- # if isinstance(feats_type, (VideoFrame, Image)):
- if key in dataset.meta.camera_keys:
- # sanity check that images are channel first
- _, c, h, w = batch[key].shape
- assert c < h and c < w, f"expect channel first images, but instead {batch[key].shape}"
-
- # sanity check that images are float32 in range [0,1]
- assert batch[key].dtype == torch.float32, f"expect torch.float32, but instead {batch[key].dtype=}"
- assert batch[key].max() <= 1, f"expect pixels lower than 1, but instead {batch[key].max()=}"
- assert batch[key].min() >= 0, f"expect pixels greater than 1, but instead {batch[key].min()=}"
-
- stats_patterns[key] = "b c h w -> c 1 1"
- elif batch[key].ndim == 2:
- stats_patterns[key] = "b c -> c "
- elif batch[key].ndim == 1:
- stats_patterns[key] = "b -> 1"
- else:
- raise ValueError(f"{key}, {batch[key].shape}")
-
- return stats_patterns
-
-
-def compute_stats(dataset, batch_size=8, num_workers=8, max_num_samples=None):
- """Compute mean/std and min/max statistics of all data keys in a LeRobotDataset."""
- if max_num_samples is None:
- max_num_samples = len(dataset)
-
- # for more info on why we need to set the same number of workers, see `load_from_videos`
- stats_patterns = get_stats_einops_patterns(dataset, num_workers)
-
- # mean and std will be computed incrementally while max and min will track the running value.
- mean, std, max, min = {}, {}, {}, {}
- for key in stats_patterns:
- mean[key] = torch.tensor(0.0).float()
- std[key] = torch.tensor(0.0).float()
- max[key] = torch.tensor(-float("inf")).float()
- min[key] = torch.tensor(float("inf")).float()
-
- def create_seeded_dataloader(dataset, batch_size, seed):
- generator = torch.Generator()
- generator.manual_seed(seed)
- dataloader = torch.utils.data.DataLoader(
- dataset,
- num_workers=num_workers,
- batch_size=batch_size,
- shuffle=True,
- drop_last=False,
- generator=generator,
- )
- return dataloader
-
- # Note: Due to be refactored soon. The point of storing `first_batch` is to make sure we don't get
- # surprises when rerunning the sampler.
- first_batch = None
- running_item_count = 0 # for online mean computation
- dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337)
- for i, batch in enumerate(
- tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute mean, min, max")
- ):
- this_batch_size = len(batch["index"])
- running_item_count += this_batch_size
- if first_batch is None:
- first_batch = deepcopy(batch)
- for key, pattern in stats_patterns.items():
- batch[key] = batch[key].float()
- # Numerically stable update step for mean computation.
- batch_mean = einops.reduce(batch[key], pattern, "mean")
- # Hint: to update the mean we need x̄ₙ = (Nₙ₋₁x̄ₙ₋₁ + Bₙxₙ) / Nₙ, where the subscript represents
- # the update step, N is the running item count, B is this batch size, x̄ is the running mean,
- # and x is the current batch mean. Some rearrangement is then required to avoid risking
- # numerical overflow. Another hint: Nₙ₋₁ = Nₙ - Bₙ. Rearrangement yields
- # x̄ₙ = x̄ₙ₋₁ + Bₙ * (xₙ - x̄ₙ₋₁) / Nₙ
- mean[key] = mean[key] + this_batch_size * (batch_mean - mean[key]) / running_item_count
- max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max"))
- min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min"))
-
- if i == ceil(max_num_samples / batch_size) - 1:
- break
-
- first_batch_ = None
- running_item_count = 0 # for online std computation
- dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337)
- for i, batch in enumerate(
- tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std")
- ):
- this_batch_size = len(batch["index"])
- running_item_count += this_batch_size
- # Sanity check to make sure the batches are still in the same order as before.
- if first_batch_ is None:
- first_batch_ = deepcopy(batch)
- for key in stats_patterns:
- assert torch.equal(first_batch_[key], first_batch[key])
- for key, pattern in stats_patterns.items():
- batch[key] = batch[key].float()
- # Numerically stable update step for mean computation (where the mean is over squared
- # residuals).See notes in the mean computation loop above.
- batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, "mean")
- std[key] = std[key] + this_batch_size * (batch_std - std[key]) / running_item_count
-
- if i == ceil(max_num_samples / batch_size) - 1:
- break
-
- for key in stats_patterns:
- std[key] = torch.sqrt(std[key])
-
- stats = {}
- for key in stats_patterns:
- stats[key] = {
- "mean": mean[key],
- "std": std[key],
- "max": max[key],
- "min": min[key],
- }
- return stats
-
-
-def aggregate_stats(ls_datasets) -> dict[str, torch.Tensor]:
- """Aggregate stats of multiple LeRobot datasets into one set of stats without recomputing from scratch.
-
- The final stats will have the union of all data keys from each of the datasets.
-
- The final stats will have the union of all data keys from each of the datasets. For instance:
- - new_max = max(max_dataset_0, max_dataset_1, ...)
- - new_min = min(min_dataset_0, min_dataset_1, ...)
- - new_mean = (mean of all data)
- - new_std = (std of all data)
- """
- data_keys = set()
- for dataset in ls_datasets:
- data_keys.update(dataset.meta.stats.keys())
- stats = {k: {} for k in data_keys}
- for data_key in data_keys:
- for stat_key in ["min", "max"]:
- # compute `max(dataset_0["max"], dataset_1["max"], ...)`
- stats[data_key][stat_key] = einops.reduce(
- torch.stack(
- [ds.meta.stats[data_key][stat_key] for ds in ls_datasets if data_key in ds.meta.stats],
- dim=0,
- ),
- "n ... -> ...",
- stat_key,
- )
- total_samples = sum(d.num_frames for d in ls_datasets if data_key in d.meta.stats)
- # Compute the "sum" statistic by multiplying each mean by the number of samples in the respective
- # dataset, then divide by total_samples to get the overall "mean".
- # NOTE: the brackets around (d.num_frames / total_samples) are needed tor minimize the risk of
- # numerical overflow!
- stats[data_key]["mean"] = sum(
- d.meta.stats[data_key]["mean"] * (d.num_frames / total_samples)
- for d in ls_datasets
- if data_key in d.meta.stats
- )
- # The derivation for standard deviation is a little more involved but is much in the same spirit as
- # the computation of the mean.
- # Given two sets of data where the statistics are known:
- # σ_combined = sqrt[ (n1 * (σ1^2 + d1^2) + n2 * (σ2^2 + d2^2)) / (n1 + n2) ]
- # where d1 = μ1 - μ_combined, d2 = μ2 - μ_combined
- # NOTE: the brackets around (d.num_frames / total_samples) are needed tor minimize the risk of
- # numerical overflow!
- stats[data_key]["std"] = torch.sqrt(
- sum(
- (
- d.meta.stats[data_key]["std"] ** 2
- + (d.meta.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2
- )
- * (d.num_frames / total_samples)
- for d in ls_datasets
- if data_key in d.meta.stats
- )
- )
- return stats
diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py
deleted file mode 100644
index f6164ed1dc..0000000000
--- a/lerobot/common/datasets/factory.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-
-import torch
-from omegaconf import ListConfig, OmegaConf
-
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, MultiLeRobotDataset
-from lerobot.common.datasets.transforms import get_image_transforms
-
-
-def resolve_delta_timestamps(cfg):
- """Resolves delta_timestamps config key (in-place) by using `eval`.
-
- Doesn't do anything if delta_timestamps is not specified or has already been resolve (as evidenced by
- the data type of its values).
- """
- delta_timestamps = cfg.training.get("delta_timestamps")
- if delta_timestamps is not None:
- for key in delta_timestamps:
- if isinstance(delta_timestamps[key], str):
- # TODO(rcadene, alexander-soare): remove `eval` to avoid exploit
- cfg.training.delta_timestamps[key] = eval(delta_timestamps[key])
-
-
-def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotDataset:
- """
- Args:
- cfg: A Hydra config as per the LeRobot config scheme.
- split: Select the data subset used to create an instance of LeRobotDataset.
- All datasets hosted on [lerobot](https://huggingface.co/lerobot) contain only one subset: "train".
- Thus, by default, `split="train"` selects all the available data. `split` aims to work like the
- slicer in the hugging face datasets:
- https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits
- As of now, it only supports `split="train[:n]"` to load the first n frames of the dataset or
- `split="train[n:]"` to load the last n frames. For instance `split="train[:1000]"`.
- Returns:
- The LeRobotDataset.
- """
- if not isinstance(cfg.dataset_repo_id, (str, ListConfig)):
- raise ValueError(
- "Expected cfg.dataset_repo_id to be either a single string to load one dataset or a list of "
- "strings to load multiple datasets."
- )
-
- # A soft check to warn if the environment matches the dataset. Don't check if we are using a real world env (dora).
- if cfg.env.name != "dora":
- if isinstance(cfg.dataset_repo_id, str):
- dataset_repo_ids = [cfg.dataset_repo_id] # single dataset
- else:
- dataset_repo_ids = cfg.dataset_repo_id # multiple datasets
-
- for dataset_repo_id in dataset_repo_ids:
- if cfg.env.name not in dataset_repo_id:
- logging.warning(
- f"There might be a mismatch between your training dataset ({dataset_repo_id=}) and your "
- f"environment ({cfg.env.name=})."
- )
-
- resolve_delta_timestamps(cfg)
-
- image_transforms = None
- if cfg.training.image_transforms.enable:
- cfg_tf = cfg.training.image_transforms
- image_transforms = get_image_transforms(
- brightness_weight=cfg_tf.brightness.weight,
- brightness_min_max=cfg_tf.brightness.min_max,
- contrast_weight=cfg_tf.contrast.weight,
- contrast_min_max=cfg_tf.contrast.min_max,
- saturation_weight=cfg_tf.saturation.weight,
- saturation_min_max=cfg_tf.saturation.min_max,
- hue_weight=cfg_tf.hue.weight,
- hue_min_max=cfg_tf.hue.min_max,
- sharpness_weight=cfg_tf.sharpness.weight,
- sharpness_min_max=cfg_tf.sharpness.min_max,
- max_num_transforms=cfg_tf.max_num_transforms,
- random_order=cfg_tf.random_order,
- )
-
- if isinstance(cfg.dataset_repo_id, str):
- # TODO (aliberts): add 'episodes' arg from config after removing hydra
- dataset = LeRobotDataset(
- cfg.dataset_repo_id,
- delta_timestamps=cfg.training.get("delta_timestamps"),
- image_transforms=image_transforms,
- video_backend=cfg.video_backend,
- )
- else:
- dataset = MultiLeRobotDataset(
- cfg.dataset_repo_id,
- delta_timestamps=cfg.training.get("delta_timestamps"),
- image_transforms=image_transforms,
- video_backend=cfg.video_backend,
- )
-
- if cfg.get("override_dataset_stats"):
- for key, stats_dict in cfg.override_dataset_stats.items():
- for stats_type, listconfig in stats_dict.items():
- # example of stats_type: min, max, mean, std
- stats = OmegaConf.to_container(listconfig, resolve=True)
- dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32)
-
- return dataset
diff --git a/lerobot/common/datasets/lerobot_dataset.py b/lerobot/common/datasets/lerobot_dataset.py
deleted file mode 100644
index 27632f0e12..0000000000
--- a/lerobot/common/datasets/lerobot_dataset.py
+++ /dev/null
@@ -1,1150 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-import os
-import shutil
-from functools import cached_property
-from pathlib import Path
-from typing import Callable
-
-import datasets
-import numpy as np
-import PIL.Image
-import torch
-import torch.utils
-from datasets import load_dataset
-from huggingface_hub import create_repo, snapshot_download, upload_folder
-
-from lerobot.common.datasets.compute_stats import aggregate_stats, compute_stats
-from lerobot.common.datasets.image_writer import AsyncImageWriter, write_image
-from lerobot.common.datasets.utils import (
- DEFAULT_FEATURES,
- DEFAULT_IMAGE_PATH,
- EPISODES_PATH,
- INFO_PATH,
- STATS_PATH,
- TASKS_PATH,
- append_jsonlines,
- check_delta_timestamps,
- check_timestamps_sync,
- check_version_compatibility,
- create_branch,
- create_empty_dataset_info,
- create_lerobot_dataset_card,
- get_delta_indices,
- get_episode_data_index,
- get_features_from_robot,
- get_hf_features_from_features,
- get_hub_safe_version,
- hf_transform_to_torch,
- load_episodes,
- load_info,
- load_stats,
- load_tasks,
- serialize_dict,
- write_json,
- write_parquet,
-)
-from lerobot.common.datasets.video_utils import (
- VideoFrame,
- decode_video_frames_torchvision,
- encode_video_frames,
- get_video_info,
-)
-from lerobot.common.robot_devices.robots.utils import Robot
-
-# For maintainers, see lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md
-CODEBASE_VERSION = "v2.0"
-LEROBOT_HOME = Path(os.getenv("LEROBOT_HOME", "~/.cache/huggingface/lerobot")).expanduser()
-
-
-class LeRobotDatasetMetadata:
- def __init__(
- self,
- repo_id: str,
- root: str | Path | None = None,
- local_files_only: bool = False,
- ):
- self.repo_id = repo_id
- self.root = Path(root) if root is not None else LEROBOT_HOME / repo_id
- self.local_files_only = local_files_only
-
- # Load metadata
- (self.root / "meta").mkdir(exist_ok=True, parents=True)
- self.pull_from_repo(allow_patterns="meta/")
- self.info = load_info(self.root)
- self.stats = load_stats(self.root)
- self.tasks = load_tasks(self.root)
- self.episodes = load_episodes(self.root)
-
- def pull_from_repo(
- self,
- allow_patterns: list[str] | str | None = None,
- ignore_patterns: list[str] | str | None = None,
- ) -> None:
- snapshot_download(
- self.repo_id,
- repo_type="dataset",
- revision=self._hub_version,
- local_dir=self.root,
- allow_patterns=allow_patterns,
- ignore_patterns=ignore_patterns,
- local_files_only=self.local_files_only,
- )
-
- @cached_property
- def _hub_version(self) -> str | None:
- return None if self.local_files_only else get_hub_safe_version(self.repo_id, CODEBASE_VERSION)
-
- @property
- def _version(self) -> str:
- """Codebase version used to create this dataset."""
- return self.info["codebase_version"]
-
- def get_data_file_path(self, ep_index: int) -> Path:
- ep_chunk = self.get_episode_chunk(ep_index)
- fpath = self.data_path.format(episode_chunk=ep_chunk, episode_index=ep_index)
- return Path(fpath)
-
- def get_video_file_path(self, ep_index: int, vid_key: str) -> Path:
- ep_chunk = self.get_episode_chunk(ep_index)
- fpath = self.video_path.format(episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_index)
- return Path(fpath)
-
- def get_episode_chunk(self, ep_index: int) -> int:
- return ep_index // self.chunks_size
-
- @property
- def data_path(self) -> str:
- """Formattable string for the parquet files."""
- return self.info["data_path"]
-
- @property
- def video_path(self) -> str | None:
- """Formattable string for the video files."""
- return self.info["video_path"]
-
- @property
- def robot_type(self) -> str | None:
- """Robot type used in recording this dataset."""
- return self.info["robot_type"]
-
- @property
- def fps(self) -> int:
- """Frames per second used during data collection."""
- return self.info["fps"]
-
- @property
- def features(self) -> dict[str, dict]:
- """All features contained in the dataset."""
- return self.info["features"]
-
- @property
- def image_keys(self) -> list[str]:
- """Keys to access visual modalities stored as images."""
- return [key for key, ft in self.features.items() if ft["dtype"] == "image"]
-
- @property
- def video_keys(self) -> list[str]:
- """Keys to access visual modalities stored as videos."""
- return [key for key, ft in self.features.items() if ft["dtype"] == "video"]
-
- @property
- def camera_keys(self) -> list[str]:
- """Keys to access visual modalities (regardless of their storage method)."""
- return [key for key, ft in self.features.items() if ft["dtype"] in ["video", "image"]]
-
- @property
- def names(self) -> dict[str, list | dict]:
- """Names of the various dimensions of vector modalities."""
- return {key: ft["names"] for key, ft in self.features.items()}
-
- @property
- def shapes(self) -> dict:
- """Shapes for the different features."""
- return {key: tuple(ft["shape"]) for key, ft in self.features.items()}
-
- @property
- def total_episodes(self) -> int:
- """Total number of episodes available."""
- return self.info["total_episodes"]
-
- @property
- def total_frames(self) -> int:
- """Total number of frames saved in this dataset."""
- return self.info["total_frames"]
-
- @property
- def total_tasks(self) -> int:
- """Total number of different tasks performed in this dataset."""
- return self.info["total_tasks"]
-
- @property
- def total_chunks(self) -> int:
- """Total number of chunks (groups of episodes)."""
- return self.info["total_chunks"]
-
- @property
- def chunks_size(self) -> int:
- """Max number of episodes per chunk."""
- return self.info["chunks_size"]
-
- @property
- def task_to_task_index(self) -> dict:
- return {task: task_idx for task_idx, task in self.tasks.items()}
-
- def get_task_index(self, task: str) -> int:
- """
- Given a task in natural language, returns its task_index if the task already exists in the dataset,
- otherwise creates a new task_index.
- """
- task_index = self.task_to_task_index.get(task, None)
- return task_index if task_index is not None else self.total_tasks
-
- def save_episode(self, episode_index: int, episode_length: int, task: str, task_index: int) -> None:
- self.info["total_episodes"] += 1
- self.info["total_frames"] += episode_length
-
- if task_index not in self.tasks:
- self.info["total_tasks"] += 1
- self.tasks[task_index] = task
- task_dict = {
- "task_index": task_index,
- "task": task,
- }
- append_jsonlines(task_dict, self.root / TASKS_PATH)
-
- chunk = self.get_episode_chunk(episode_index)
- if chunk >= self.total_chunks:
- self.info["total_chunks"] += 1
-
- self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"}
- self.info["total_videos"] += len(self.video_keys)
- write_json(self.info, self.root / INFO_PATH)
-
- episode_dict = {
- "episode_index": episode_index,
- "tasks": [task],
- "length": episode_length,
- }
- self.episodes.append(episode_dict)
- append_jsonlines(episode_dict, self.root / EPISODES_PATH)
-
- # TODO(aliberts): refactor stats in save_episodes
- # image_sampling = int(self.fps / 2) # sample 2 img/s for the stats
- # ep_stats = compute_episode_stats(episode_buffer, self.features, episode_length, image_sampling=image_sampling)
- # ep_stats = serialize_dict(ep_stats)
- # append_jsonlines(ep_stats, self.root / STATS_PATH)
-
- def write_video_info(self) -> None:
- """
- Warning: this function writes info from first episode videos, implicitly assuming that all videos have
- been encoded the same way. Also, this means it assumes the first episode exists.
- """
- for key in self.video_keys:
- if not self.features[key].get("info", None):
- video_path = self.root / self.get_video_file_path(ep_index=0, vid_key=key)
- self.info["features"][key]["info"] = get_video_info(video_path)
-
- write_json(self.info, self.root / INFO_PATH)
-
- def __repr__(self):
- feature_keys = list(self.features)
- return (
- f"{self.__class__.__name__}({{\n"
- f" Repository ID: '{self.repo_id}',\n"
- f" Total episodes: '{self.total_episodes}',\n"
- f" Total frames: '{self.total_frames}',\n"
- f" Features: '{feature_keys}',\n"
- "})',\n"
- )
-
- @classmethod
- def create(
- cls,
- repo_id: str,
- fps: int,
- root: str | Path | None = None,
- robot: Robot | None = None,
- robot_type: str | None = None,
- features: dict | None = None,
- use_videos: bool = True,
- ) -> "LeRobotDatasetMetadata":
- """Creates metadata for a LeRobotDataset."""
- obj = cls.__new__(cls)
- obj.repo_id = repo_id
- obj.root = Path(root) if root is not None else LEROBOT_HOME / repo_id
-
- obj.root.mkdir(parents=True, exist_ok=False)
-
- if robot is not None:
- features = get_features_from_robot(robot, use_videos)
- robot_type = robot.robot_type
- if not all(cam.fps == fps for cam in robot.cameras.values()):
- logging.warning(
- f"Some cameras in your {robot.robot_type} robot don't have an fps matching the fps of your dataset."
- "In this case, frames from lower fps cameras will be repeated to fill in the blanks."
- )
- elif features is None:
- raise ValueError(
- "Dataset features must either come from a Robot or explicitly passed upon creation."
- )
- else:
- # TODO(aliberts, rcadene): implement sanity check for features
-
- # check if none of the features contains a "/" in their names,
- # as this would break the dict flattening in the stats computation, which uses '/' as separator
- for key in features:
- if "/" in key:
- raise ValueError(f"Feature names should not contain '/'. Found '/' in feature '{key}'.")
-
- features = {**features, **DEFAULT_FEATURES}
-
- obj.tasks, obj.stats, obj.episodes = {}, {}, []
- obj.info = create_empty_dataset_info(CODEBASE_VERSION, fps, robot_type, features, use_videos)
- if len(obj.video_keys) > 0 and not use_videos:
- raise ValueError()
- write_json(obj.info, obj.root / INFO_PATH)
- obj.local_files_only = True
- return obj
-
-
-class LeRobotDataset(torch.utils.data.Dataset):
- def __init__(
- self,
- repo_id: str,
- root: str | Path | None = None,
- episodes: list[int] | None = None,
- image_transforms: Callable | None = None,
- delta_timestamps: dict[list[float]] | None = None,
- tolerance_s: float = 1e-4,
- download_videos: bool = True,
- local_files_only: bool = False,
- video_backend: str | None = None,
- ):
- """
- 2 modes are available for instantiating this class, depending on 2 different use cases:
-
- 1. Your dataset already exists:
- - On your local disk in the 'root' folder. This is typically the case when you recorded your
- dataset locally and you may or may not have pushed it to the hub yet. Instantiating this class
- with 'root' will load your dataset directly from disk. This can happen while you're offline (no
- internet connection), in that case, use local_files_only=True.
-
- - On the Hugging Face Hub at the address https://huggingface.co/datasets/{repo_id} and not on
- your local disk in the 'root' folder. Instantiating this class with this 'repo_id' will download
- the dataset from that address and load it, pending your dataset is compliant with
- codebase_version v2.0. If your dataset has been created before this new format, you will be
- prompted to convert it using our conversion script from v1.6 to v2.0, which you can find at
- lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py.
-
-
- 2. Your dataset doesn't already exists (either on local disk or on the Hub): you can create an empty
- LeRobotDataset with the 'create' classmethod. This can be used for recording a dataset or port an
- existing dataset to the LeRobotDataset format.
-
-
- In terms of files, LeRobotDataset encapsulates 3 main things:
- - metadata:
- - info contains various information about the dataset like shapes, keys, fps etc.
- - stats stores the dataset statistics of the different modalities for normalization
- - tasks contains the prompts for each task of the dataset, which can be used for
- task-conditionned training.
- - hf_dataset (from datasets.Dataset), which will read any values from parquet files.
- - videos (optional) from which frames are loaded to be synchronous with data from parquet files.
-
- A typical LeRobotDataset looks like this from its root path:
- .
- ├── data
- │ ├── chunk-000
- │ │ ├── episode_000000.parquet
- │ │ ├── episode_000001.parquet
- │ │ ├── episode_000002.parquet
- │ │ └── ...
- │ ├── chunk-001
- │ │ ├── episode_001000.parquet
- │ │ ├── episode_001001.parquet
- │ │ ├── episode_001002.parquet
- │ │ └── ...
- │ └── ...
- ├── meta
- │ ├── episodes.jsonl
- │ ├── info.json
- │ ├── stats.json
- │ └── tasks.jsonl
- └── videos
- ├── chunk-000
- │ ├── observation.images.laptop
- │ │ ├── episode_000000.mp4
- │ │ ├── episode_000001.mp4
- │ │ ├── episode_000002.mp4
- │ │ └── ...
- │ ├── observation.images.phone
- │ │ ├── episode_000000.mp4
- │ │ ├── episode_000001.mp4
- │ │ ├── episode_000002.mp4
- │ │ └── ...
- ├── chunk-001
- └── ...
-
- Note that this file-based structure is designed to be as versatile as possible. The files are split by
- episodes which allows a more granular control over which episodes one wants to use and download. The
- structure of the dataset is entirely described in the info.json file, which can be easily downloaded
- or viewed directly on the hub before downloading any actual data. The type of files used are very
- simple and do not need complex tools to be read, it only uses .parquet, .json and .mp4 files (and .md
- for the README).
-
- Args:
- repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset
- will be stored under root/repo_id.
- root (Path | None, optional): Local directory to use for downloading/writing files. You can also
- set the LEROBOT_HOME environment variable to point to a different location. Defaults to
- '~/.cache/huggingface/lerobot'.
- episodes (list[int] | None, optional): If specified, this will only load episodes specified by
- their episode_index in this list. Defaults to None.
- image_transforms (Callable | None, optional): You can pass standard v2 image transforms from
- torchvision.transforms.v2 here which will be applied to visual modalities (whether they come
- from videos or images). Defaults to None.
- delta_timestamps (dict[list[float]] | None, optional): _description_. Defaults to None.
- tolerance_s (float, optional): Tolerance in seconds used to ensure data timestamps are actually in
- sync with the fps value. It is used at the init of the dataset to make sure that each
- timestamps is separated to the next by 1/fps +/- tolerance_s. This also applies to frames
- decoded from video files. It is also used to check that `delta_timestamps` (when provided) are
- multiples of 1/fps. Defaults to 1e-4.
- download_videos (bool, optional): Flag to download the videos. Note that when set to True but the
- video files are already present on local disk, they won't be downloaded again. Defaults to
- True.
- local_files_only (bool, optional): Flag to use local files only. If True, no requests to the hub
- will be made. Defaults to False.
- video_backend (str | None, optional): Video backend to use for decoding videos. There is currently
- a single option which is the pyav decoder used by Torchvision. Defaults to pyav.
- """
- super().__init__()
- self.repo_id = repo_id
- self.root = Path(root) if root else LEROBOT_HOME / repo_id
- self.image_transforms = image_transforms
- self.delta_timestamps = delta_timestamps
- self.episodes = episodes
- self.tolerance_s = tolerance_s
- self.video_backend = video_backend if video_backend else "pyav"
- self.delta_indices = None
- self.local_files_only = local_files_only
-
- # Unused attributes
- self.image_writer = None
- self.episode_buffer = None
-
- self.root.mkdir(exist_ok=True, parents=True)
-
- # Load metadata
- self.meta = LeRobotDatasetMetadata(self.repo_id, self.root, self.local_files_only)
-
- # Check version
- check_version_compatibility(self.repo_id, self.meta._version, CODEBASE_VERSION)
-
- # Load actual data
- self.download_episodes(download_videos)
- self.hf_dataset = self.load_hf_dataset()
- self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes)
-
- # Check timestamps
- check_timestamps_sync(self.hf_dataset, self.episode_data_index, self.fps, self.tolerance_s)
-
- # Setup delta_indices
- if self.delta_timestamps is not None:
- check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s)
- self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps)
-
- # Available stats implies all videos have been encoded and dataset is iterable
- self.consolidated = self.meta.stats is not None
-
- def push_to_hub(
- self,
- tags: list | None = None,
- license: str | None = "apache-2.0",
- push_videos: bool = True,
- private: bool = False,
- **card_kwargs,
- ) -> None:
- if not self.consolidated:
- logging.warning(
- "You are trying to upload to the hub a LeRobotDataset that has not been consolidated yet. "
- "Consolidating first."
- )
- self.consolidate()
-
- ignore_patterns = ["images/"]
- if not push_videos:
- ignore_patterns.append("videos/")
-
- create_repo(
- repo_id=self.repo_id,
- private=private,
- repo_type="dataset",
- exist_ok=True,
- )
-
- upload_folder(
- repo_id=self.repo_id,
- folder_path=self.root,
- repo_type="dataset",
- ignore_patterns=ignore_patterns,
- )
- card = create_lerobot_dataset_card(
- tags=tags, dataset_info=self.meta.info, license=license, **card_kwargs
- )
- card.push_to_hub(repo_id=self.repo_id, repo_type="dataset")
- create_branch(repo_id=self.repo_id, branch=CODEBASE_VERSION, repo_type="dataset")
-
- def pull_from_repo(
- self,
- allow_patterns: list[str] | str | None = None,
- ignore_patterns: list[str] | str | None = None,
- ) -> None:
- snapshot_download(
- self.repo_id,
- repo_type="dataset",
- revision=self.meta._hub_version,
- local_dir=self.root,
- allow_patterns=allow_patterns,
- ignore_patterns=ignore_patterns,
- local_files_only=self.local_files_only,
- )
-
- def download_episodes(self, download_videos: bool = True) -> None:
- """Downloads the dataset from the given 'repo_id' at the provided version. If 'episodes' is given, this
- will only download those episodes (selected by their episode_index). If 'episodes' is None, the whole
- dataset will be downloaded. Thanks to the behavior of snapshot_download, if the files are already present
- in 'local_dir', they won't be downloaded again.
- """
- # TODO(rcadene, aliberts): implement faster transfer
- # https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads
- files = None
- ignore_patterns = None if download_videos else "videos/"
- if self.episodes is not None:
- files = [str(self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes]
- if len(self.meta.video_keys) > 0 and download_videos:
- video_files = [
- str(self.meta.get_video_file_path(ep_idx, vid_key))
- for vid_key in self.meta.video_keys
- for ep_idx in self.episodes
- ]
- files += video_files
-
- self.pull_from_repo(allow_patterns=files, ignore_patterns=ignore_patterns)
-
- def load_hf_dataset(self) -> datasets.Dataset:
- """hf_dataset contains all the observations, states, actions, rewards, etc."""
- if self.episodes is None:
- path = str(self.root / "data")
- hf_dataset = load_dataset("parquet", data_dir=path, split="train")
- else:
- files = [str(self.root / self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes]
- hf_dataset = load_dataset("parquet", data_files=files, split="train")
-
- # TODO(aliberts): hf_dataset.set_format("torch")
- hf_dataset.set_transform(hf_transform_to_torch)
-
- return hf_dataset
-
- @property
- def fps(self) -> int:
- """Frames per second used during data collection."""
- return self.meta.fps
-
- @property
- def num_frames(self) -> int:
- """Number of frames in selected episodes."""
- return len(self.hf_dataset) if self.hf_dataset is not None else self.meta.total_frames
-
- @property
- def num_episodes(self) -> int:
- """Number of episodes selected."""
- return len(self.episodes) if self.episodes is not None else self.meta.total_episodes
-
- @property
- def features(self) -> dict[str, dict]:
- return self.meta.features
-
- @property
- def hf_features(self) -> datasets.Features:
- """Features of the hf_dataset."""
- if self.hf_dataset is not None:
- return self.hf_dataset.features
- else:
- return get_hf_features_from_features(self.features)
-
- def _get_query_indices(self, idx: int, ep_idx: int) -> tuple[dict[str, list[int | bool]]]:
- ep_start = self.episode_data_index["from"][ep_idx]
- ep_end = self.episode_data_index["to"][ep_idx]
- query_indices = {
- key: [max(ep_start.item(), min(ep_end.item() - 1, idx + delta)) for delta in delta_idx]
- for key, delta_idx in self.delta_indices.items()
- }
- padding = { # Pad values outside of current episode range
- f"{key}_is_pad": torch.BoolTensor(
- [(idx + delta < ep_start.item()) | (idx + delta >= ep_end.item()) for delta in delta_idx]
- )
- for key, delta_idx in self.delta_indices.items()
- }
- return query_indices, padding
-
- def _get_query_timestamps(
- self,
- current_ts: float,
- query_indices: dict[str, list[int]] | None = None,
- ) -> dict[str, list[float]]:
- query_timestamps = {}
- for key in self.meta.video_keys:
- if query_indices is not None and key in query_indices:
- timestamps = self.hf_dataset.select(query_indices[key])["timestamp"]
- query_timestamps[key] = torch.stack(timestamps).tolist()
- else:
- query_timestamps[key] = [current_ts]
-
- return query_timestamps
-
- def _query_hf_dataset(self, query_indices: dict[str, list[int]]) -> dict:
- return {
- key: torch.stack(self.hf_dataset.select(q_idx)[key])
- for key, q_idx in query_indices.items()
- if key not in self.meta.video_keys
- }
-
- def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict:
- """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function
- in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a
- Segmentation Fault. This probably happens because a memory reference to the video loader is created in
- the main process and a subprocess fails to access it.
- """
- item = {}
- for vid_key, query_ts in query_timestamps.items():
- video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key)
- frames = decode_video_frames_torchvision(
- video_path, query_ts, self.tolerance_s, self.video_backend
- )
- item[vid_key] = frames.squeeze(0)
-
- return item
-
- def _add_padding_keys(self, item: dict, padding: dict[str, list[bool]]) -> dict:
- for key, val in padding.items():
- item[key] = torch.BoolTensor(val)
- return item
-
- def __len__(self):
- return self.num_frames
-
- def __getitem__(self, idx) -> dict:
- item = self.hf_dataset[idx]
- ep_idx = item["episode_index"].item()
-
- query_indices = None
- if self.delta_indices is not None:
- current_ep_idx = self.episodes.index(ep_idx) if self.episodes is not None else ep_idx
- query_indices, padding = self._get_query_indices(idx, current_ep_idx)
- query_result = self._query_hf_dataset(query_indices)
- item = {**item, **padding}
- for key, val in query_result.items():
- item[key] = val
-
- if len(self.meta.video_keys) > 0:
- current_ts = item["timestamp"].item()
- query_timestamps = self._get_query_timestamps(current_ts, query_indices)
- video_frames = self._query_videos(query_timestamps, ep_idx)
- item = {**video_frames, **item}
-
- if self.image_transforms is not None:
- image_keys = self.meta.camera_keys
- for cam in image_keys:
- item[cam] = self.image_transforms(item[cam])
-
- return item
-
- def __repr__(self):
- feature_keys = list(self.features)
- return (
- f"{self.__class__.__name__}({{\n"
- f" Repository ID: '{self.repo_id}',\n"
- f" Number of selected episodes: '{self.num_episodes}',\n"
- f" Number of selected samples: '{self.num_frames}',\n"
- f" Features: '{feature_keys}',\n"
- "})',\n"
- )
-
- def create_episode_buffer(self, episode_index: int | None = None) -> dict:
- current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index
- return {
- "size": 0,
- **{key: current_ep_idx if key == "episode_index" else [] for key in self.features},
- }
-
- def _get_image_file_path(self, episode_index: int, image_key: str, frame_index: int) -> Path:
- fpath = DEFAULT_IMAGE_PATH.format(
- image_key=image_key, episode_index=episode_index, frame_index=frame_index
- )
- return self.root / fpath
-
- def _save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path) -> None:
- if self.image_writer is None:
- if isinstance(image, torch.Tensor):
- image = image.cpu().numpy()
- write_image(image, fpath)
- else:
- self.image_writer.save_image(image=image, fpath=fpath)
-
- def add_frame(self, frame: dict) -> None:
- """
- This function only adds the frame to the episode_buffer. Apart from images — which are written in a
- temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method
- then needs to be called.
- """
- # TODO(aliberts, rcadene): Add sanity check for the input, check it's numpy or torch,
- # check the dtype and shape matches, etc.
-
- if self.episode_buffer is None:
- self.episode_buffer = self.create_episode_buffer()
-
- frame_index = self.episode_buffer["size"]
- timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps
- self.episode_buffer["frame_index"].append(frame_index)
- self.episode_buffer["timestamp"].append(timestamp)
-
- for key in frame:
- if key not in self.features:
- raise ValueError(key)
-
- if self.features[key]["dtype"] not in ["image", "video"]:
- item = frame[key].numpy() if isinstance(frame[key], torch.Tensor) else frame[key]
- self.episode_buffer[key].append(item)
- elif self.features[key]["dtype"] in ["image", "video"]:
- img_path = self._get_image_file_path(
- episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index
- )
- if frame_index == 0:
- img_path.parent.mkdir(parents=True, exist_ok=True)
- self._save_image(frame[key], img_path)
- self.episode_buffer[key].append(str(img_path))
-
- self.episode_buffer["size"] += 1
-
- def save_episode(self, task: str, encode_videos: bool = True, episode_data: dict | None = None) -> None:
- """
- This will save to disk the current episode in self.episode_buffer. Note that since it affects files on
- disk, it sets self.consolidated to False to ensure proper consolidation later on before uploading to
- the hub.
-
- Use 'encode_videos' if you want to encode videos during the saving of this episode. Otherwise,
- you can do it later with dataset.consolidate(). This is to give more flexibility on when to spend
- time for video encoding.
- """
- if not episode_data:
- episode_buffer = self.episode_buffer
-
- episode_length = episode_buffer.pop("size")
- episode_index = episode_buffer["episode_index"]
- if episode_index != self.meta.total_episodes:
- # TODO(aliberts): Add option to use existing episode_index
- raise NotImplementedError(
- "You might have manually provided the episode_buffer with an episode_index that doesn't "
- "match the total number of episodes in the dataset. This is not supported for now."
- )
-
- if episode_length == 0:
- raise ValueError(
- "You must add one or several frames with `add_frame` before calling `add_episode`."
- )
-
- task_index = self.meta.get_task_index(task)
-
- if not set(episode_buffer.keys()) == set(self.features):
- raise ValueError()
-
- for key, ft in self.features.items():
- if key == "index":
- episode_buffer[key] = np.arange(
- self.meta.total_frames, self.meta.total_frames + episode_length
- )
- elif key == "episode_index":
- episode_buffer[key] = np.full((episode_length,), episode_index)
- elif key == "task_index":
- episode_buffer[key] = np.full((episode_length,), task_index)
- elif ft["dtype"] in ["image", "video"]:
- continue
- elif len(ft["shape"]) == 1 and ft["shape"][0] == 1:
- episode_buffer[key] = np.array(episode_buffer[key], dtype=ft["dtype"])
- elif len(ft["shape"]) == 1 and ft["shape"][0] > 1:
- episode_buffer[key] = np.stack(episode_buffer[key])
- else:
- raise ValueError(key)
-
- self._wait_image_writer()
- self._save_episode_table(episode_buffer, episode_index)
-
- self.meta.save_episode(episode_index, episode_length, task, task_index)
-
- if encode_videos and len(self.meta.video_keys) > 0:
- video_paths = self.encode_episode_videos(episode_index)
- for key in self.meta.video_keys:
- episode_buffer[key] = video_paths[key]
-
- if not episode_data: # Reset the buffer
- self.episode_buffer = self.create_episode_buffer()
-
- self.consolidated = False
-
- def _save_episode_table(self, episode_buffer: dict, episode_index: int) -> None:
- episode_dict = {key: episode_buffer[key] for key in self.hf_features}
- ep_dataset = datasets.Dataset.from_dict(episode_dict, features=self.hf_features, split="train")
- ep_data_path = self.root / self.meta.get_data_file_path(ep_index=episode_index)
- ep_data_path.parent.mkdir(parents=True, exist_ok=True)
- write_parquet(ep_dataset, ep_data_path)
-
- def clear_episode_buffer(self) -> None:
- episode_index = self.episode_buffer["episode_index"]
- if self.image_writer is not None:
- for cam_key in self.meta.camera_keys:
- img_dir = self._get_image_file_path(
- episode_index=episode_index, image_key=cam_key, frame_index=0
- ).parent
- if img_dir.is_dir():
- shutil.rmtree(img_dir)
-
- # Reset the buffer
- self.episode_buffer = self.create_episode_buffer()
-
- def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> None:
- if isinstance(self.image_writer, AsyncImageWriter):
- logging.warning(
- "You are starting a new AsyncImageWriter that is replacing an already existing one in the dataset."
- )
-
- self.image_writer = AsyncImageWriter(
- num_processes=num_processes,
- num_threads=num_threads,
- )
-
- def stop_image_writer(self) -> None:
- """
- Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to
- remove the image_write in order for the LeRobotDataset object to be pickleable and parallelized.
- """
- if self.image_writer is not None:
- self.image_writer.stop()
- self.image_writer = None
-
- def _wait_image_writer(self) -> None:
- """Wait for asynchronous image writer to finish."""
- if self.image_writer is not None:
- self.image_writer.wait_until_done()
-
- def encode_videos(self) -> None:
- """
- Use ffmpeg to convert frames stored as png into mp4 videos.
- Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
- since video encoding with ffmpeg is already using multithreading.
- """
- for ep_idx in range(self.meta.total_episodes):
- self.encode_episode_videos(ep_idx)
-
- def encode_episode_videos(self, episode_index: int) -> dict:
- """
- Use ffmpeg to convert frames stored as png into mp4 videos.
- Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
- since video encoding with ffmpeg is already using multithreading.
- """
- video_paths = {}
- for key in self.meta.video_keys:
- video_path = self.root / self.meta.get_video_file_path(episode_index, key)
- video_paths[key] = str(video_path)
- if video_path.is_file():
- # Skip if video is already encoded. Could be the case when resuming data recording.
- continue
- img_dir = self._get_image_file_path(
- episode_index=episode_index, image_key=key, frame_index=0
- ).parent
- encode_video_frames(img_dir, video_path, self.fps, overwrite=True)
-
- return video_paths
-
- def consolidate(self, run_compute_stats: bool = True, keep_image_files: bool = False) -> None:
- self.hf_dataset = self.load_hf_dataset()
- self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes)
- check_timestamps_sync(self.hf_dataset, self.episode_data_index, self.fps, self.tolerance_s)
-
- if len(self.meta.video_keys) > 0:
- self.encode_videos()
- self.meta.write_video_info()
-
- if not keep_image_files:
- img_dir = self.root / "images"
- if img_dir.is_dir():
- shutil.rmtree(self.root / "images")
-
- video_files = list(self.root.rglob("*.mp4"))
- assert len(video_files) == self.num_episodes * len(self.meta.video_keys)
-
- parquet_files = list(self.root.rglob("*.parquet"))
- assert len(parquet_files) == self.num_episodes
-
- if run_compute_stats:
- self.stop_image_writer()
- # TODO(aliberts): refactor stats in save_episodes
- self.meta.stats = compute_stats(self)
- serialized_stats = serialize_dict(self.meta.stats)
- write_json(serialized_stats, self.root / STATS_PATH)
- self.consolidated = True
- else:
- logging.warning(
- "Skipping computation of the dataset statistics, dataset is not fully consolidated."
- )
-
- @classmethod
- def create(
- cls,
- repo_id: str,
- fps: int,
- root: str | Path | None = None,
- robot: Robot | None = None,
- robot_type: str | None = None,
- features: dict | None = None,
- use_videos: bool = True,
- tolerance_s: float = 1e-4,
- image_writer_processes: int = 0,
- image_writer_threads: int = 0,
- video_backend: str | None = None,
- ) -> "LeRobotDataset":
- """Create a LeRobot Dataset from scratch in order to record data."""
- obj = cls.__new__(cls)
- obj.meta = LeRobotDatasetMetadata.create(
- repo_id=repo_id,
- fps=fps,
- root=root,
- robot=robot,
- robot_type=robot_type,
- features=features,
- use_videos=use_videos,
- )
- obj.repo_id = obj.meta.repo_id
- obj.root = obj.meta.root
- obj.local_files_only = obj.meta.local_files_only
- obj.tolerance_s = tolerance_s
- obj.image_writer = None
-
- if image_writer_processes or image_writer_threads:
- obj.start_image_writer(image_writer_processes, image_writer_threads)
-
- # TODO(aliberts, rcadene, alexander-soare): Merge this with OnlineBuffer/DataBuffer
- obj.episode_buffer = obj.create_episode_buffer()
-
- # This bool indicates that the current LeRobotDataset instance is in sync with the files on disk. It
- # is used to know when certain operations are need (for instance, computing dataset statistics). In
- # order to be able to push the dataset to the hub, it needs to be consolidated first by calling
- # self.consolidate().
- obj.consolidated = True
-
- obj.episodes = None
- obj.hf_dataset = None
- obj.image_transforms = None
- obj.delta_timestamps = None
- obj.delta_indices = None
- obj.episode_data_index = None
- obj.video_backend = video_backend if video_backend is not None else "pyav"
- return obj
-
-
-class MultiLeRobotDataset(torch.utils.data.Dataset):
- """A dataset consisting of multiple underlying `LeRobotDataset`s.
-
- The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API
- structure of `LeRobotDataset`.
- """
-
- def __init__(
- self,
- repo_ids: list[str],
- root: str | Path | None = None,
- episodes: dict | None = None,
- image_transforms: Callable | None = None,
- delta_timestamps: dict[list[float]] | None = None,
- tolerances_s: dict | None = None,
- download_videos: bool = True,
- local_files_only: bool = False,
- video_backend: str | None = None,
- ):
- super().__init__()
- self.repo_ids = repo_ids
- self.root = Path(root) if root else LEROBOT_HOME
- self.tolerances_s = tolerances_s if tolerances_s else {repo_id: 1e-4 for repo_id in repo_ids}
- # Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which
- # are handled by this class.
- self._datasets = [
- LeRobotDataset(
- repo_id,
- root=self.root / repo_id,
- episodes=episodes[repo_id] if episodes else None,
- image_transforms=image_transforms,
- delta_timestamps=delta_timestamps,
- tolerance_s=self.tolerances_s[repo_id],
- download_videos=download_videos,
- local_files_only=local_files_only,
- video_backend=video_backend,
- )
- for repo_id in repo_ids
- ]
-
- # Disable any data keys that are not common across all of the datasets. Note: we may relax this
- # restriction in future iterations of this class. For now, this is necessary at least for being able
- # to use PyTorch's default DataLoader collate function.
- self.disabled_features = set()
- intersection_features = set(self._datasets[0].features)
- for ds in self._datasets:
- intersection_features.intersection_update(ds.features)
- if len(intersection_features) == 0:
- raise RuntimeError(
- "Multiple datasets were provided but they had no keys common to all of them. "
- "The multi-dataset functionality currently only keeps common keys."
- )
- for repo_id, ds in zip(self.repo_ids, self._datasets, strict=True):
- extra_keys = set(ds.features).difference(intersection_features)
- logging.warning(
- f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the "
- "other datasets."
- )
- self.disabled_features.update(extra_keys)
-
- self.image_transforms = image_transforms
- self.delta_timestamps = delta_timestamps
- self.stats = aggregate_stats(self._datasets)
-
- @property
- def repo_id_to_index(self):
- """Return a mapping from dataset repo_id to a dataset index automatically created by this class.
-
- This index is incorporated as a data key in the dictionary returned by `__getitem__`.
- """
- return {repo_id: i for i, repo_id in enumerate(self.repo_ids)}
-
- @property
- def repo_index_to_id(self):
- """Return the inverse mapping if repo_id_to_index."""
- return {v: k for k, v in self.repo_id_to_index}
-
- @property
- def fps(self) -> int:
- """Frames per second used during data collection.
-
- NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
- """
- return self._datasets[0].meta.info["fps"]
-
- @property
- def video(self) -> bool:
- """Returns True if this dataset loads video frames from mp4 files.
-
- Returns False if it only loads images from png files.
-
- NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
- """
- return self._datasets[0].meta.info.get("video", False)
-
- @property
- def features(self) -> datasets.Features:
- features = {}
- for dataset in self._datasets:
- features.update({k: v for k, v in dataset.hf_features.items() if k not in self.disabled_features})
- return features
-
- @property
- def camera_keys(self) -> list[str]:
- """Keys to access image and video stream from cameras."""
- keys = []
- for key, feats in self.features.items():
- if isinstance(feats, (datasets.Image, VideoFrame)):
- keys.append(key)
- return keys
-
- @property
- def video_frame_keys(self) -> list[str]:
- """Keys to access video frames that requires to be decoded into images.
-
- Note: It is empty if the dataset contains images only,
- or equal to `self.cameras` if the dataset contains videos only,
- or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
- """
- video_frame_keys = []
- for key, feats in self.features.items():
- if isinstance(feats, VideoFrame):
- video_frame_keys.append(key)
- return video_frame_keys
-
- @property
- def num_frames(self) -> int:
- """Number of samples/frames."""
- return sum(d.num_frames for d in self._datasets)
-
- @property
- def num_episodes(self) -> int:
- """Number of episodes."""
- return sum(d.num_episodes for d in self._datasets)
-
- @property
- def tolerance_s(self) -> float:
- """Tolerance in seconds used to discard loaded frames when their timestamps
- are not close enough from the requested frames. It is only used when `delta_timestamps`
- is provided or when loading video frames from mp4 files.
- """
- # 1e-4 to account for possible numerical error
- return 1 / self.fps - 1e-4
-
- def __len__(self):
- return self.num_frames
-
- def __getitem__(self, idx: int) -> dict[str, torch.Tensor]:
- if idx >= len(self):
- raise IndexError(f"Index {idx} out of bounds.")
- # Determine which dataset to get an item from based on the index.
- start_idx = 0
- dataset_idx = 0
- for dataset in self._datasets:
- if idx >= start_idx + dataset.num_frames:
- start_idx += dataset.num_frames
- dataset_idx += 1
- continue
- break
- else:
- raise AssertionError("We expect the loop to break out as long as the index is within bounds.")
- item = self._datasets[dataset_idx][idx - start_idx]
- item["dataset_index"] = torch.tensor(dataset_idx)
- for data_key in self.disabled_features:
- if data_key in item:
- del item[data_key]
-
- return item
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}(\n"
- f" Repository IDs: '{self.repo_ids}',\n"
- f" Number of Samples: {self.num_frames},\n"
- f" Number of Episodes: {self.num_episodes},\n"
- f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
- f" Recorded Frames per Second: {self.fps},\n"
- f" Camera Keys: {self.camera_keys},\n"
- f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
- f" Transformations: {self.image_transforms},\n"
- f")"
- )
diff --git a/lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md b/lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md
deleted file mode 100644
index 8fcc8bbeac..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md
+++ /dev/null
@@ -1,56 +0,0 @@
-## Using / Updating `CODEBASE_VERSION` (for maintainers)
-
-Since our dataset pushed to the hub are decoupled with the evolution of this repo, we ensure compatibility of
-the datasets with our code, we use a `CODEBASE_VERSION` (defined in
-lerobot/common/datasets/lerobot_dataset.py) variable.
-
-For instance, [`lerobot/pusht`](https://huggingface.co/datasets/lerobot/pusht) has many versions to maintain backward compatibility between LeRobot codebase versions:
-- [v1.0](https://huggingface.co/datasets/lerobot/pusht/tree/v1.0)
-- [v1.1](https://huggingface.co/datasets/lerobot/pusht/tree/v1.1)
-- [v1.2](https://huggingface.co/datasets/lerobot/pusht/tree/v1.2)
-- [v1.3](https://huggingface.co/datasets/lerobot/pusht/tree/v1.3)
-- [v1.4](https://huggingface.co/datasets/lerobot/pusht/tree/v1.4)
-- [v1.5](https://huggingface.co/datasets/lerobot/pusht/tree/v1.5)
-- [v1.6](https://huggingface.co/datasets/lerobot/pusht/tree/v1.6) <-- last version
-- [main](https://huggingface.co/datasets/lerobot/pusht/tree/main) <-- points to the last version
-
-Starting with v1.6, every dataset pushed to the hub or saved locally also have this version number in their
-`info.json` metadata.
-
-### Uploading a new dataset
-If you are pushing a new dataset, you don't need to worry about any of the instructions below, nor to be
-compatible with previous codebase versions. The `push_dataset_to_hub.py` script will automatically tag your
-dataset with the current `CODEBASE_VERSION`.
-
-### Updating an existing dataset
-If you want to update an existing dataset, you need to change the `CODEBASE_VERSION` from `lerobot_dataset.py`
-before running `push_dataset_to_hub.py`. This is especially useful if you introduce a breaking change
-intentionally or not (i.e. something not backward compatible such as modifying the reward functions used,
-deleting some frames at the end of an episode, etc.). That way, people running a previous version of the
-codebase won't be affected by your change and backward compatibility is maintained.
-
-However, you will need to update the version of ALL the other datasets so that they have the new
-`CODEBASE_VERSION` as a branch in their hugging face dataset repository. Don't worry, there is an easy way
-that doesn't require to run `push_dataset_to_hub.py`. You can just "branch-out" from the `main` branch on HF
-dataset repo by running this script which corresponds to a `git checkout -b` (so no copy or upload needed):
-
-```python
-from huggingface_hub import HfApi
-
-from lerobot import available_datasets
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-
-api = HfApi()
-
-for repo_id in available_datasets:
- dataset_info = api.list_repo_refs(repo_id, repo_type="dataset")
- branches = [b.name for b in dataset_info.branches]
- if CODEBASE_VERSION in branches:
- print(f"{repo_id} already @{CODEBASE_VERSION}, skipping.")
- continue
- else:
- # Now create a branch named after the new version by branching out from "main"
- # which is expected to be the preceding version
- api.create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION, revision="main")
- print(f"{repo_id} successfully updated @{CODEBASE_VERSION}")
-```
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_cabinet.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_cabinet.txt
deleted file mode 100644
index 8e821d292d..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_cabinet.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-https://drive.google.com/file/d/1_SOJkgfP5yZyVjMhTt3nwhvyUjcnlI51/view?usp=drive_link
-https://drive.google.com/file/d/1rmgN8UUzph1qwJnzG1d-uOafodn-gLvb/view?usp=drive_link
-https://drive.google.com/file/d/1NYQ-XxsBVinB6dUoZmVWweT83367P3i2/view?usp=drive_link
-https://drive.google.com/file/d/1oAv_j74zxxCJieMG7r5Vl2BeHK1__3s3/view?usp=drive_link
-https://drive.google.com/file/d/1wFUJQROsrTJt64YRuIeExhFjr2wnK5uu/view?usp=drive_link
-https://drive.google.com/file/d/1KzL3Tt0Le7jVl58XVRUcmigmXjyiuhbK/view?usp=drive_link
-https://drive.google.com/file/d/1qy_YBladeHtianSSGtgAPSHtMin7msvf/view?usp=drive_link
-https://drive.google.com/file/d/1rA_F0V_qL_nyuC_0aBKCisF4-0TIkF2Y/view?usp=drive_link
-https://drive.google.com/file/d/1hw-8qMpz9VgSt62XoASqNRuPECpCwJQP/view?usp=drive_link
-https://drive.google.com/file/d/1BpHOl9rKMzdvNGka6js7C0s40hH6vnDA/view?usp=drive_link
-https://drive.google.com/file/d/1PazhkhiDnJ-OUMyDVDFxEZNKQQqHiNWS/view?usp=drive_link
-https://drive.google.com/file/d/1lZ665R6ATl57dypxH4dGJ2NSt6XYnbuz/view?usp=drive_link
-https://drive.google.com/file/d/1V9HzLaf-tlG15wUzT7KrTDCS_z1vi5NV/view?usp=drive_link
-https://drive.google.com/file/d/1aKauWiXoKqbNwn_2xs4MrmLlaNYlVNmO/view?usp=drive_link
-https://drive.google.com/file/d/1WVD5DFhriO1YmmOgiVHhacR6HWoTPxav/view?usp=drive_link
-https://drive.google.com/file/d/1_X43WgeBAsfkhH9EmpyPki8U9joMeAGC/view?usp=drive_link
-https://drive.google.com/file/d/1t8x0GqWoNKWtnBsB7_D40Z34nL9ak4kf/view?usp=drive_link
-https://drive.google.com/file/d/15V_f26WaKOXjKnq2T3HRWAmtQUi4lbu2/view?usp=drive_link
-https://drive.google.com/file/d/11VFIAsiSDsMOBANgrOcZBpKB9AFWnLy7/view?usp=drive_link
-https://drive.google.com/file/d/1M0NS7vVaxJv3FHnuRYtdwTFYF7We4LxP/view?usp=drive_link
-https://drive.google.com/file/d/1mR0OItTNqFnVLoczcyKYlm6drAy778lO/view?usp=drive_link
-https://drive.google.com/file/d/1NbVFWDQAh-z4JJ4D-Zw6Lps9kdvpqh2j/view?usp=drive_link
-https://drive.google.com/file/d/1JQoZGBzl4W3QG26-n39tefcGN0fDRMbB/view?usp=drive_link
-https://drive.google.com/file/d/1VBjHl-TvZpncopvasIP5G9gecbB2a5f6/view?usp=drive_link
-https://drive.google.com/file/d/1VzSf6zaB21nahm7MsPwroXbJ84NIwq0b/view?usp=drive_link
-https://drive.google.com/file/d/1OtNnfMEydNtZOcivs4k6E_uJSpf8PkGy/view?usp=drive_link
-https://drive.google.com/file/d/14nVvpvsrFr_03Pa_N7MKzwnRwibOUYM6/view?usp=drive_link
-https://drive.google.com/file/d/1M8li6duiO2r3lv_9HhF_XJn0oZUIEK5F/view?usp=drive_link
-https://drive.google.com/file/d/1Cpzea6fO14lxAaNfSBifqoa4ekhCiLD1/view?usp=drive_link
-https://drive.google.com/file/d/1mbxRTm5vlbsY9UJ0jfjM6j9D7kPJjBpG/view?usp=drive_link
-https://drive.google.com/file/d/1RXD1i6IfWsHRlCxVmG04h2h5Ycm_WwZN/view?usp=drive_link
-https://drive.google.com/file/d/1QFqFSwDGOk1BkgGmqgCcc2BRWnJ6R3MA/view?usp=drive_link
-https://drive.google.com/file/d/1bFqWR8DQM0ZUxxtS2bl-RANQvukeFLzp/view?usp=drive_link
-https://drive.google.com/file/d/1pR-rH3yNGoyPdD4hJ6-3lXQ-PstBx9du/view?usp=drive_link
-https://drive.google.com/file/d/107OAwLY-hva9HeQLIK7VCh-ytdDabVjr/view?usp=drive_link
-https://drive.google.com/file/d/1Tpl08QOaSZ37GTO4awFWSdD8wBR9xdlT/view?usp=drive_link
-https://drive.google.com/file/d/1MR164AOM-0S1T6RX8xKTV2IHyaCvpqAW/view?usp=drive_link
-https://drive.google.com/file/d/1_wknJfVnStIhJ82lU_QtcrwahsqYIsr8/view?usp=drive_link
-https://drive.google.com/file/d/1ZuEktWrbYkTx0l5pj3WiZ2CJrfbDOHNo/view?usp=drive_link
-https://drive.google.com/file/d/15G_10hkkkq6yxvyI5NGZirlF-RzduR2F/view?usp=drive_link
-https://drive.google.com/file/d/1DBKxg3ONqh7dhLuX6oh1Yyo2x383V1Hp/view?usp=drive_link
-https://drive.google.com/file/d/1B5iDBkTUr5vopDddV_fHud18SqAHhauS/view?usp=drive_link
-https://drive.google.com/file/d/1acwFV0eenRkki1QcjSKH5xqOtys-P3Pr/view?usp=drive_link
-https://drive.google.com/file/d/1S47BI83xyrh-FKXsvAQqer98Biu_p8XK/view?usp=drive_link
-https://drive.google.com/file/d/1JL6DmBZl3uyq9dyLfgSqtGF06e7E9JwM/view?usp=drive_link
-https://drive.google.com/file/d/16WvRS4Kjog8Pxgr0E3sGGnI01YwL9Uql/view?usp=drive_link
-https://drive.google.com/file/d/12ttGqL33IPWg0-s1SD44rr22M6LiSQBr/view?usp=drive_link
-https://drive.google.com/file/d/1OyZqqnldTU_DliRbr6x0C4a_iWPwIN7j/view?usp=drive_link
-https://drive.google.com/file/d/1oYk00IpLnR9fesLfD15Ebe7nVBffEbcS/view?usp=drive_link
-https://drive.google.com/file/d/1eyE2-MQduCEqCd-5_kl5zsoOEERAzpZD/view?usp=drive_link
-https://drive.google.com/file/d/1ir1Ya-vO0d97pfvbePlUeuKTTRc0qIMU/view?usp=drive_link
-https://drive.google.com/file/d/1hOi-JnqlMt47gVnLZHMTqeojyYVErohl/view?usp=drive_link
-https://drive.google.com/file/d/1NFFw5_PqigQ7xGqsL-MNq2B1r5yAscCf/view?usp=drive_link
-https://drive.google.com/file/d/1uftq1-Zlh8d2sNLWrlVcKYQUwZTD7o24/view?usp=drive_link
-https://drive.google.com/file/d/1-ax19dSLPacVgk000T-m3l4flPcg07pM/view?usp=drive_link
-https://drive.google.com/file/d/126y-lgn86-ZmCz8hooF1THKJGGObw3OB/view?usp=drive_link
-https://drive.google.com/file/d/1JiDniK0VmDIkk92AbBILb8J2Ba59PWML/view?usp=drive_link
-https://drive.google.com/file/d/1kr8nPIRljiU0R4J9SMgj80o1FPQxzu9z/view?usp=drive_link
-https://drive.google.com/file/d/1bbThWRij1pKBh_kFgV8FwK0sXtTHBoLX/view?usp=drive_link
-https://drive.google.com/file/d/1WenzDW6lxk1xkOFm-OiGFfc0ROskAuKU/view?usp=drive_link
-https://drive.google.com/file/d/1MiKRzuzUn1yN-k_6kPJJzIGy7dT-nnsD/view?usp=drive_link
-https://drive.google.com/file/d/17rRg2tcmB-gNhQ0KoZJQmNfyFeoij1jH/view?usp=drive_link
-https://drive.google.com/file/d/11mokBpvrY3ld6sY5WztREtJ1jgqfQV70/view?usp=drive_link
-https://drive.google.com/file/d/1Il_6IOx9NDp1bX_KHizJfBwzTufTmn86/view?usp=drive_link
-https://drive.google.com/file/d/1KswtJGsxJ7eeBDAmNA_aeLjOxcH6MIxa/view?usp=drive_link
-https://drive.google.com/file/d/1gzMhi5uWu4C3Y6WbQ3L-08V96GxTZrRR/view?usp=drive_link
-https://drive.google.com/file/d/1nRQFtaBxfUCYc2W90Qibh0kHCt6YQCfc/view?usp=drive_link
-https://drive.google.com/file/d/1vs-gyW-KheqHbUATwAhA2mmR9GOGw7f_/view?usp=drive_link
-https://drive.google.com/file/d/1MuxzGOA2fgLaHryq82KkQumtuRJGcUOC/view?usp=drive_link
-https://drive.google.com/file/d/1IIwxZnGlqrXLUXqG6yMO0r7uhCvhpk9e/view?usp=drive_link
-https://drive.google.com/file/d/1vE7XPyaFcXP4DtTY5Y9WKIt7zWgmX-Cr/view?usp=drive_link
-https://drive.google.com/file/d/1j-bIV09gr21RC3-x1N_pK4RPLV3fmWKz/view?usp=drive_link
-https://drive.google.com/file/d/1t3nW1rD3S-EL0Oymb5U7ZAj5UMkydkln/view?usp=drive_link
-https://drive.google.com/file/d/14hbfHCdMKtJZ41F9CQReMec2jeRFTOqR/view?usp=drive_link
-https://drive.google.com/file/d/1x-hUyOSne5BW0AzQ3W6_Pf4g5yXQWi9M/view?usp=drive_link
-https://drive.google.com/file/d/1sw9JqRg6E-3P84I3ZhzTrJMu0vuiaMmP/view?usp=drive_link
-https://drive.google.com/file/d/1LuqhQlL4MGZhB_6THmkovRxrlP26BbdC/view?usp=drive_link
-https://drive.google.com/file/d/15C5K6v_lkjnMSmUvVyqHQKwh2N166e7K/view?usp=drive_link
-https://drive.google.com/file/d/1ns_9eSsQeeoZ10nlbkLy8tu0GmJFSnkt/view?usp=drive_link
-https://drive.google.com/file/d/1NpzWJeK6CqjxzjIMYe6aYdX8xGsQwD4o/view?usp=drive_link
-https://drive.google.com/file/d/1NMLezwufKJ9_8xTc9KQThSzVVD71B9Ui/view?usp=drive_link
-https://drive.google.com/file/d/1aa71DCUqs6oXlIxX35jgsmsgm-NlDxPV/view?usp=drive_link
-https://drive.google.com/file/d/1UJzkIZzAL0j-D5YQBnoq7mHvttASy12O/view?usp=drive_link
-https://drive.google.com/file/d/1nPgx36HIJFb7oI94VbRzWjpPP2GANxzG/view?usp=drive_link
-https://drive.google.com/file/d/1NovAP-KVJjqcuvWy3d6G4ptGGAIDqcCx/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_chair.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_chair.txt
deleted file mode 100644
index 497f8d0459..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_chair.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-https://drive.google.com/file/d/11M3Ye0r5agMaaicPbVGD0q2Hb3rGklbb/view?usp=drive_link
-https://drive.google.com/file/d/1-tx7SvYYgSvXCvnf_EI2OVdwK-CkFY6S/view?usp=drive_link
-https://drive.google.com/file/d/1EWJunmOpMHaU1hE106wwpbkGYcjQXYAF/view?usp=drive_link
-https://drive.google.com/file/d/1IDn95Z7FSiCckrSENtGV4u3RyFHNQSDY/view?usp=drive_link
-https://drive.google.com/file/d/1CwzvWj1i7QOtqrZvsCZ6BdZaKNDfpN32/view?usp=drive_link
-https://drive.google.com/file/d/1HvAvlhm77nAD3Td24QPSeq8lw-Rl_aOh/view?usp=drive_link
-https://drive.google.com/file/d/1t-suKYOPhXH666RpAYNRp2QU_DOy3AeM/view?usp=drive_link
-https://drive.google.com/file/d/18xpKgWh7RWyjMN5PkLTOo-AxsAadAuRw/view?usp=drive_link
-https://drive.google.com/file/d/1oci5Eto-ztv-AQNz8EnwZveBIhxvk-xJ/view?usp=drive_link
-https://drive.google.com/file/d/1Y-t_4vxdE6NpHO0DLJR8f3mD0Q-Wj5-c/view?usp=drive_link
-https://drive.google.com/file/d/1lylRqbbbB8bgtpsBWMPACmHJreuKmllv/view?usp=drive_link
-https://drive.google.com/file/d/1yliSyMig_NXShWfQx6qyW7Ijf2Y5lFK6/view?usp=drive_link
-https://drive.google.com/file/d/1XXhwJsJbeb7KXAooGvJapnm9bjnGUmxS/view?usp=drive_link
-https://drive.google.com/file/d/1_xs1f3hW2JArKyvfF7UWubWjyROGTLs6/view?usp=drive_link
-https://drive.google.com/file/d/1WVEHpr6EqKCZbkHapQSTXJq4xE4SWFT-/view?usp=drive_link
-https://drive.google.com/file/d/1RqOHv9pEQGvW8NUA7ynffFmG999TL_Az/view?usp=drive_link
-https://drive.google.com/file/d/1cu5AgD2gh-uA3PFJmzxxzNaF3qOSlYY1/view?usp=drive_link
-https://drive.google.com/file/d/1SsrXqiPclNrnYToPZ9Uq-k3y0C4qdHT1/view?usp=drive_link
-https://drive.google.com/file/d/1-J7EXf0vjkLIfSqT8ICEsP6CTjzSLBop/view?usp=drive_link
-https://drive.google.com/file/d/11O7ewUmoZXfyyKjy_6B5RW4DpjICxqBT/view?usp=drive_link
-https://drive.google.com/file/d/1iic44kZoCsjNsfAz2cMstZ9-WQvAhblF/view?usp=drive_link
-https://drive.google.com/file/d/1yLV1lVX-2WnWQldGlnQZ0x7QBuDiVkL3/view?usp=drive_link
-https://drive.google.com/file/d/1Tybp9ru98TTbGn4eyROpUQwDFuALWXmk/view?usp=drive_link
-https://drive.google.com/file/d/13E9OTMiipVJByDs5-J19oWwAz7l94LTN/view?usp=drive_link
-https://drive.google.com/file/d/1EeTpJQdMSliw4JzSMtJ6CyTvVdexjM4M/view?usp=drive_link
-https://drive.google.com/file/d/1NHyNwoFqzeAu-1_PSpq5JfxaiD_xbpn9/view?usp=drive_link
-https://drive.google.com/file/d/1fJcS0phDp4xm_FyGaJ5wr9Pe4KqtHaxD/view?usp=drive_link
-https://drive.google.com/file/d/12AqrLUaewDPEcFRqPZeZFb_TQ0Lfi3At/view?usp=drive_link
-https://drive.google.com/file/d/1x_hd4Qsq1oJS-aj2t3qM7WbbV7KZj05b/view?usp=drive_link
-https://drive.google.com/file/d/14OUSUArmsB068hs6BuEIXQhI1Cyz8Sf0/view?usp=drive_link
-https://drive.google.com/file/d/16zlzh1T5zeUJQnFf382NXkFEKEnDub4O/view?usp=drive_link
-https://drive.google.com/file/d/1IbDltmN-NEFCNtr1TO4ILxEgQ94rtjWv/view?usp=drive_link
-https://drive.google.com/file/d/15gmlf8Gx9455pZ1AlqcCSwh3nDPxMzSr/view?usp=drive_link
-https://drive.google.com/file/d/1qHpRL1oZfIMo_vxnm8qfwQ-7l0BZIVva/view?usp=drive_link
-https://drive.google.com/file/d/1H1xskIgiFZivkYn23rMzH3xePGOh3VTC/view?usp=drive_link
-https://drive.google.com/file/d/1avls6Pv0kYiCMNVknbc1zQsgy64MUDMM/view?usp=drive_link
-https://drive.google.com/file/d/1MmWVgCj5khc8KMIifmt3EzF1o-CtPyyn/view?usp=drive_link
-https://drive.google.com/file/d/1U0kCc_xqW0WNppf4sbnK14euWKdPZtzB/view?usp=drive_link
-https://drive.google.com/file/d/16CaEyQscOuhLj23PEGDTL9DeyNkohkMn/view?usp=drive_link
-https://drive.google.com/file/d/1Iu8uM6UUJ0zW8tvN-9UiOe_4oSNzEutg/view?usp=drive_link
-https://drive.google.com/file/d/1UImqiBaIxCR-1DNJaZhHqeHhaySOtVIr/view?usp=drive_link
-https://drive.google.com/file/d/1VpU2V_leIoRIyv_lAvE7eLHBG8DxCTnp/view?usp=drive_link
-https://drive.google.com/file/d/1_Q8J27OT3Xby7QY6yHvIJauFRWEMxkRm/view?usp=drive_link
-https://drive.google.com/file/d/1bantmVo1L9Xz4tbiNw_a1UC2Z_HPO1wT/view?usp=drive_link
-https://drive.google.com/file/d/1IRIXMJMCBDkBjbaHvAlEiBogSvZ1jK_3/view?usp=drive_link
-https://drive.google.com/file/d/1mAHXKjiFbjwydypW2t5Lv8_H5x6nHegl/view?usp=drive_link
-https://drive.google.com/file/d/1SfyY796fLrBCMY39OcyuxZafqSCRZPZk/view?usp=drive_link
-https://drive.google.com/file/d/1X-44sZ8CcfzIskc0dvSx882o1yFhHaZB/view?usp=drive_link
-https://drive.google.com/file/d/1BOIWCCCk6DLD4Bmvc75ZbbLi9AQm-1ao/view?usp=drive_link
-https://drive.google.com/file/d/1RuyDtRE1kk76sw-wP8vx5SgLoPF3PA_H/view?usp=drive_link
-https://drive.google.com/file/d/1c4eoQiBbGuy3CTAQDUSkd84Ponh1roAQ/view?usp=drive_link
-https://drive.google.com/file/d/19PXB9z4Ljq6dsbf9TqcOrrP5SRbw2Tc_/view?usp=drive_link
-https://drive.google.com/file/d/1nn1VVZVoIXWdYDozR7XHXE4mPLQG80PQ/view?usp=drive_link
-https://drive.google.com/file/d/1MBdFGOKPV8GUhwoSsJ_Ky3qAMLM2Bv3K/view?usp=drive_link
-https://drive.google.com/file/d/1of3k_M-7Nh3I1TndcWedxK4ca9dn8Sc5/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_elevator.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_elevator.txt
deleted file mode 100644
index abb42b5598..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_elevator.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-https://drive.google.com/file/d/12ctkOAdkCNGN1JLbZb5ww3XTBn2LFpGI/view?usp=drive_link
-https://drive.google.com/file/d/1G_Vd46_4fq6O64gHHjUbJX5Ld44ZZx0y/view?usp=drive_link
-https://drive.google.com/file/d/1uKgUy73B3xBogQAOUhfZjO0X5qZGsi2c/view?usp=drive_link
-https://drive.google.com/file/d/1fu9cIrfI-fE2LhdGUxbx7-8Ci_PF8Ypm/view?usp=drive_link
-https://drive.google.com/file/d/1Ygk9ZPJzx8xw2A9JF3NHbJ44TqnvSTQR/view?usp=drive_link
-https://drive.google.com/file/d/18m5xPuccNsEB20WPshm3zhxmXc6k63ED/view?usp=drive_link
-https://drive.google.com/file/d/1DiqqxC44rriviRQpqogcv0-EB-Y6nr9g/view?usp=drive_link
-https://drive.google.com/file/d/1qPdaoTVDizJXkfXLioWU7iJ8hqCXSyOQ/view?usp=drive_link
-https://drive.google.com/file/d/1Fj9kIA_mG7f67WFfACJEaZ7izcHG7vUm/view?usp=drive_link
-https://drive.google.com/file/d/1WpYehZnI2P7dUdJPfkE-ij1rqCnjZEbB/view?usp=drive_link
-https://drive.google.com/file/d/1_zwWkT4jPyzB38STWb6whlzsPzXmfA9r/view?usp=drive_link
-https://drive.google.com/file/d/1U6-J4I_fPlSFFGfhZPxS5_YzKXwXIZYp/view?usp=drive_link
-https://drive.google.com/file/d/1pRhxxcTfZp5tQo_EScvJUwfc3amiS6Vk/view?usp=drive_link
-https://drive.google.com/file/d/1lWLntqra83RlYU_gN7Vostnfydf6gutd/view?usp=drive_link
-https://drive.google.com/file/d/1vIBKo0x-NYEHV1FvRpco1lQMpRdAWAIL/view?usp=drive_link
-https://drive.google.com/file/d/1pdrLV3JTQou_XH0Aap61Ssf60iVKm1jJ/view?usp=drive_link
-https://drive.google.com/file/d/1QTsLoQ7SwmKdQHjBGVDaR2uTwfFwtrOf/view?usp=drive_link
-https://drive.google.com/file/d/1Gytai8M_12J36GY6L_TulEcOC-035jwS/view?usp=drive_link
-https://drive.google.com/file/d/14LJudNc629NT-i8xreXtzl27ce_DxOFJ/view?usp=drive_link
-https://drive.google.com/file/d/1sBvPCODbzxGAI0S3lgN5cSG9Go3lRi00/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_shrimp.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_shrimp.txt
deleted file mode 100644
index a6d76bd767..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_shrimp.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-https://drive.google.com/file/d/1MJn9GbC8p9lN4gC9KDMLEkTkP_gGpXj0/view?usp=drive_link
-https://drive.google.com/file/d/1-4LXgjl7ZCOgp-8GCJmFRD8OeqN5Jf7-/view?usp=drive_link
-https://drive.google.com/file/d/1Ho06Ce0SPbqU3juaMxNUwAt3zCRLGC8W/view?usp=drive_link
-https://drive.google.com/file/d/1ivHoj7_7olBSxH-Y8kqXEW7ttITK-45j/view?usp=drive_link
-https://drive.google.com/file/d/1qjY4hM_IvZ8cq2II_n9MeJbvyeuN4oBP/view?usp=drive_link
-https://drive.google.com/file/d/1rKVhO_f92-7sw13T8hTVrza3B9oAVgoy/view?usp=drive_link
-https://drive.google.com/file/d/1pcLPHO8fBkc1-CRa88tyQtEueE4xiXNi/view?usp=drive_link
-https://drive.google.com/file/d/1Vev_chCsIeEdvQ8poEYNsOJFGy_QU8kZ/view?usp=drive_link
-https://drive.google.com/file/d/1l5G4zpRkxSLCQjvGPYSN4zfCvVRQuzMz/view?usp=drive_link
-https://drive.google.com/file/d/14vgthE1eoakXkr2-DRw50E6lAqYOiUuE/view?usp=drive_link
-https://drive.google.com/file/d/17nPSmKKmgQ2B7zkzWrZYiLM3RBuFod82/view?usp=drive_link
-https://drive.google.com/file/d/1QcDsxplVvb_ID9BVrihl5FvlC-j7waXi/view?usp=drive_link
-https://drive.google.com/file/d/18pEejBpI-eEVaWAAjBCyC0vgbX3T1Esj/view?usp=drive_link
-https://drive.google.com/file/d/1H8eH6_IRODtEFT6WoM77ltR5OoOrqXmI/view?usp=drive_link
-https://drive.google.com/file/d/1IWlpFRZhoxyG4nS13CWK4leZVk5wbNx4/view?usp=drive_link
-https://drive.google.com/file/d/1PbZA8_OCGmMLxNP9xbkLRSChniL4uGxl/view?usp=drive_link
-https://drive.google.com/file/d/1p9XAdmG2f_WeflNO4DIJ_tr1rK6M9B4B/view?usp=drive_link
-https://drive.google.com/file/d/1nS59Et1cNAvKo3Y4SeSGRuZD5TvBbCF3/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_wash_pan.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_wash_pan.txt
deleted file mode 100644
index 5e3732bd7c..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_wash_pan.txt
+++ /dev/null
@@ -1 +0,0 @@
-https://drive.google.com/drive/folders/1S8eFg98IaGAIKVZ8QFWG1bx4mHa-O204
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_wipe_wine.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_wipe_wine.txt
deleted file mode 100644
index 17a13f1afa..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/mobile_wipe_wine.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-https://drive.google.com/drive/folders/1tC_g1AJ8lglBLY-fjsQrG6DMBa3Ucp-0
-https://drive.google.com/file/d/1fG_Yi2MJrFjiUVN3XoiWXLtTxHlwwaDv/view?usp=drive_link
-https://drive.google.com/file/d/1WX32VWfzzX3Blmd06DRxLwFbMJfVe7P4/view?usp=drive_link
-https://drive.google.com/file/d/18onsX3vXg3xkFwP5bVUCjdV4n9TRn0C9/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_insertion_human.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_insertion_human.txt
deleted file mode 100644
index 19bb7114ce..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_insertion_human.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-https://drive.google.com/drive/folders/1RgyD0JgTX30H4IM5XZn8I3zSV_mr8pyF
-https://drive.google.com/file/d/18Cudl6nikDtgRolea7je8iF_gGKzynOP/view?usp=drive_link
-https://drive.google.com/file/d/1C1kZYyROzs-PrLc0SkDgUgMi4-L3lauE/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_insertion_scripted.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_insertion_scripted.txt
deleted file mode 100644
index fc80579b31..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_insertion_scripted.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-https://drive.google.com/drive/folders/1TsojQQSXtHEoGnqgJ3gmpPQR2DPLtS2N
-https://drive.google.com/file/d/1wfMSZ24oOh5KR_0aaP3Cnu_c4ZCveduB/view?usp=drive_link
-https://drive.google.com/file/d/17EuCUWS6uCCr6yyNzpXdcdE-_TTNCKtf/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_transfer_cube_human.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_transfer_cube_human.txt
deleted file mode 100644
index f5161ea275..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_transfer_cube_human.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-https://drive.google.com/drive/folders/1sc-E4QYW7A0o23m1u2VWNGVq5smAsfCo
-https://drive.google.com/file/d/18smMymtr8tIxaNUQ61gW6dG50pt3MvGq/view?usp=drive_link
-https://drive.google.com/file/d/1Nk7l53d9sJoGDBKAOnNrExX5nLacATc6/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_transfer_cube_scripted.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_transfer_cube_scripted.txt
deleted file mode 100644
index d3a5b4141f..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/sim_transfer_cube_scripted.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-https://drive.google.com/drive/folders/1aRyoOhQwxhyt1J8XgEig4s6kzaw__LXj
-https://drive.google.com/file/d/1pnGIOd-E4-rhz2P3VxpknMKRZCoKt6eI/view?usp=drive_link
-https://drive.google.com/file/d/1GKReZHrXU73NMiC5zKCq_UtqPVtYq8eo/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_battery.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_battery.txt
deleted file mode 100644
index a3613eb751..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_battery.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/19qS_n7vKgDcPeTMnvDHQ5-n73xEbJz5D
-https://drive.google.com/file/d/1oC31By0A2bsBeHyUwBdQw1z4ng6yi9Za/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_candy.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_candy.txt
deleted file mode 100644
index a39bde5676..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_candy.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1m5rQ6UVH8Q9RQp_6c0CxkQ88-L-ScO7q
-https://drive.google.com/file/d/1wHz2qcmwcVG0C0CZ9MjQDQcmj4OY9_a3/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_coffee.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_coffee.txt
deleted file mode 100644
index 3f4acbd074..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_coffee.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1seQGay470nGQ-knBI5TjsTr8iL9Qws5q
-https://drive.google.com/file/d/1T89hSX5U99wLGvGTE7yUBaQPOpyj6Sai/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_coffee_new.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_coffee_new.txt
deleted file mode 100644
index 06667fefd8..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_coffee_new.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1t3eDc5Rg0DveyRe8oTm6Dia_FYU5mXyf
-https://drive.google.com/file/d/1TXFaduTakvS0ZWJqKCX-HIvYglum_5CY/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_cups_open.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_cups_open.txt
deleted file mode 100644
index 2cde5fa092..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_cups_open.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1Z9X3DNzd6LS0FFjQemNUMoMA5yk5VQOh
-https://drive.google.com/file/d/1Wlyc0vTkjXuWB6zbaVOWhEfD7BmPgUV_/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_fork_pick_up.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_fork_pick_up.txt
deleted file mode 100644
index 92b0d474f7..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_fork_pick_up.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-https://drive.google.com/drive/folders/1DYgB4ifX4uIid9m9jnC0Zdz8Nf7ZC0fc
-https://drive.google.com/file/d/1Eb-NRNk_FmVleCbU_Ng5Y4dfcjTKN7Rv/view?usp=drive_link
-https://drive.google.com/file/d/1dkhjEADakT-44l9jf-nK4x89kr4yG_qb/view?usp=drive_link
-https://drive.google.com/file/d/14hDhgcZkVqNExGb4tIXpSjMshhqZETch/view?usp=drive_link
-https://drive.google.com/file/d/1zVMEHpHbuNyP5A_lYU7RPSLB-4V0yfZw/view?usp=drive_link
-https://drive.google.com/file/d/1JtgDjBvy7FnRpFzrx_foC3quorYQFAR-/view?usp=drive_link
-https://drive.google.com/file/d/1EHdneB6F-PP0dQlX8qPaXbxmKoBy_YwO/view?usp=drive_link
-https://drive.google.com/file/d/17Z0jjVBy1OPKREPu77_n_rQzorDiapji/view?usp=drive_link
-https://drive.google.com/file/d/1F4i23qPJ_qTf5jWjfLo4ARGJChznYWt3/view?usp=drive_link
-https://drive.google.com/file/d/1kZtXWM3uS0-rLblydBfJ0mMcVnMMXw9w/view?usp=drive_link
-https://drive.google.com/file/d/1mNODox87xFfY5Z_o5mcLsr8SHb39jDik/view?usp=drive_link
-https://drive.google.com/file/d/1Ob44VdmEUA93FKDECiRb5Ogz2xQg5IWp/view?usp=drive_link
-https://drive.google.com/file/d/1fdQLdjj3Cwv33R1wZhfrLz9Del8mqgHb/view?usp=drive_link
-https://drive.google.com/file/d/1Yu3L3ft21zP__XL8pCfhb788ZleuW1n5/view?usp=drive_link
-https://drive.google.com/file/d/1ozBBWXVZ9hXDh9ooHUNroHdYm8UDqnhJ/view?usp=drive_link
-https://drive.google.com/file/d/1o0TGqvfWw_Lunxb5ubKDS21Lr_WC0h75/view?usp=drive_link
-https://drive.google.com/file/d/1jZnd5eP5L6BH5l98BPN6OnoQx3fu8e9n/view?usp=drive_link
-https://drive.google.com/file/d/1S5sYbz8wcLYp0V67v13i4PRcBxodn4Hg/view?usp=drive_link
-https://drive.google.com/file/d/1rFeg_x6ftJYwPtBv34D3h2L2cpDLeR4G/view?usp=drive_link
-https://drive.google.com/file/d/1GvS3lcm4o6nm_scUk0XxKeVFNmzjucDZ/view?usp=drive_link
-https://drive.google.com/file/d/1-9i0riphC7NhhDahcQfD1QoBXP5gF90A/view?usp=drive_link
-https://drive.google.com/file/d/15p_IqGsMbKuvzMS872THAZr-3SBtb1Fr/view?usp=drive_link
-https://drive.google.com/file/d/1ToyYcBfJL8gbQn0q_59zPLsFmm7dmMJo/view?usp=drive_link
-https://drive.google.com/file/d/1e_7PNH7CYafE4pAebP7ZdI7XFbmEcy_i/view?usp=drive_link
-https://drive.google.com/file/d/1JoabvGVsIQdug2xOhUIhetEIyDM91y_Y/view?usp=drive_link
-https://drive.google.com/file/d/1kOMw1y0lmnVaCjwZICfzCsx6e0Z8MNGR/view?usp=drive_link
-https://drive.google.com/file/d/16it_wd1JOevUQTK2_CvF_pBACTgpIPgM/view?usp=drive_link
-https://drive.google.com/file/d/1IRcCj9HnJSfbyMgr5XEERGlEnWeZQwOc/view?usp=drive_link
-https://drive.google.com/file/d/1Z2dIJfq_S3liGmPN9Rphvkmucnmw7tlb/view?usp=drive_link
-https://drive.google.com/file/d/1J3NoAjzndGx9yNyaBOJHdNny1epzUoBt/view?usp=drive_link
-https://drive.google.com/file/d/18nOvxV1k8FSmBrhT4TPo2sKKSZXougyx/view?usp=drive_link
-https://drive.google.com/file/d/1CT8FxclafFMjSd7gCWVw3VSeryeiF04i/view?usp=drive_link
-https://drive.google.com/file/d/16M9KVqQMFfSsXfypK0bocFft8Nz3j2Rt/view?usp=drive_link
-https://drive.google.com/file/d/18QPVkw6bj6HW8LTPrQLWrrUX4R6RcF42/view?usp=drive_link
-https://drive.google.com/file/d/1hQTVtA5hBTE_StXpJafTZJ3tgt2VQQ_t/view?usp=drive_link
-https://drive.google.com/file/d/1Dn-d5g69H6EgAWgsFdrcbJKtz7ySsCQ8/view?usp=drive_link
-https://drive.google.com/file/d/13hMr16483P7ALYv73yMRUN37fJdVQM62/view?usp=drive_link
-https://drive.google.com/file/d/1848yN3XMN5zJMEgApt6KzrWgfRPfimtv/view?usp=drive_link
-https://drive.google.com/file/d/1oAD9kSnS0fTgj-CjD4u9VdZ5X67IOIMa/view?usp=drive_link
-https://drive.google.com/file/d/1ilzIWLCCG5b_KgF5s0wdN2I5-lFNpwC1/view?usp=drive_link
-https://drive.google.com/file/d/1rjsT2YBjnidxod1s9s-myAYz8boHr-WB/view?usp=drive_link
-https://drive.google.com/file/d/18Gg48HTub15bd8qzbhiCUufbVy0fbN5G/view?usp=drive_link
-https://drive.google.com/file/d/1WsSnQSqmMTVSRwrhT1Y-v782My2zcjLm/view?usp=drive_link
-https://drive.google.com/file/d/1ea9ZCvoyc-xqiFXgeDcA_mOWsw7VUuoi/view?usp=drive_link
-https://drive.google.com/file/d/1wv1v3-XhPgbNzp62BXbJTDzMPu2tlDUc/view?usp=drive_link
-https://drive.google.com/file/d/18-ikzt8LoZ83Gi3goKCELs4U4z8hrRoF/view?usp=drive_link
-https://drive.google.com/file/d/16Bjhp7JNCXkGuLvyNcZowAx3W-Y-15DV/view?usp=drive_link
-https://drive.google.com/file/d/1Gc-KRI-xwcp1fMR55ugbrLg_5y3SPde-/view?usp=drive_link
-https://drive.google.com/file/d/1oP72Q386Z4Sy5MMm-t5yNogIe5Van_9k/view?usp=drive_link
-https://drive.google.com/file/d/112T90eDUDVH-SyOV7UnZl5bscAH2hcfq/view?usp=drive_link
-https://drive.google.com/file/d/1y-uKOesRRhjgDtFbG_j65f4SGg0v8XDg/view?usp=drive_link
-https://drive.google.com/file/d/1LOP05OagoI3km-ZKQBrS204A85UVk7Ok/view?usp=drive_link
-https://drive.google.com/file/d/1QkHQKgasVzWsmdPvkXgGhWyQ84d93_Az/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_pingpong_test.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_pingpong_test.txt
deleted file mode 100644
index c622def62d..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_pingpong_test.txt
+++ /dev/null
@@ -1 +0,0 @@
-https://drive.google.com/drive/folders/1Ut2cv6o6Pkfgg46DgwVUM7Z5PkNG8eJ-
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_pro_pencil.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_pro_pencil.txt
deleted file mode 100644
index bdfc447f5d..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_pro_pencil.txt
+++ /dev/null
@@ -1 +0,0 @@
-https://drive.google.com/drive/folders/1FqxPV0PgvgIu8XFjtvZSPSExuNcxVVAY
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_screw_driver.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_screw_driver.txt
deleted file mode 100644
index fe5548fdd1..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_screw_driver.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1SKtG0ct9q0nVdYssJNMWSOjikcXliT58
-https://drive.google.com/file/d/1nchD21O30B3i3LDoqramo1zgW5YvpJIN/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_tape.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_tape.txt
deleted file mode 100644
index 46d9547900..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_tape.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1_4DHf2cma0xsChLQFghwigX6Ukti5-zQ
-https://drive.google.com/file/d/1_8vS4hDNDgUQY-SmekrNaa7dF67QJYU-/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_thread_velcro.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_thread_velcro.txt
deleted file mode 100644
index 46d9547900..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_thread_velcro.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1_4DHf2cma0xsChLQFghwigX6Ukti5-zQ
-https://drive.google.com/file/d/1_8vS4hDNDgUQY-SmekrNaa7dF67QJYU-/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_towel.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_towel.txt
deleted file mode 100644
index 19288fa5cd..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_towel.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-https://drive.google.com/drive/folders/1fAD7vkyTGTFB_nGXIKofCU1U05oE3MFv
-https://drive.google.com/file/d/1XzyQ2B6LLvcurIonOpEu4nij2qwNWshH/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_vinh_cup.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_vinh_cup.txt
deleted file mode 100644
index 65ec35c4b5..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_vinh_cup.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-https://drive.google.com/drive/folders/13EQsVsnxT86K20QAoyE_YpsFbQ7fZQdu
-https://drive.google.com/file/d/1-W_JHghZG65FNTVhw1SXhtQrazdLL3Ue/view?usp=drive_link
-https://drive.google.com/file/d/1VwRJgdWUo-2nQaNM7Bs77-fsm8iwUxEo/view?usp=drive_link
-https://drive.google.com/file/d/1wFzGRo5iYA13WLi6IV1ry64RyahQBFio/view?usp=drive_link
-https://drive.google.com/file/d/1IKtQzQ-n-UTv64hYpReu2R4cqUvmNQqD/view?usp=drive_link
-https://drive.google.com/file/d/1GicVci9OiuuZZH79i5Mg7AtWod94MzwT/view?usp=drive_link
-https://drive.google.com/file/d/1JVnIoR7EIQp70T4eAf9RX65JcTrzsjQc/view?usp=drive_link
-https://drive.google.com/file/d/1W2xr4h23ucjPrc-mBEeqnACsfaImpc0p/view?usp=drive_link
-https://drive.google.com/file/d/10xj_0V7A07o3uCa7v5omUrTC0YlPW8H3/view?usp=drive_link
-https://drive.google.com/file/d/1FOc3EMaCy8Mb0_a7PuXLAwKwvxkbKmwU/view?usp=drive_link
-https://drive.google.com/file/d/143PgDXBcf2GQ0Q07ZPMVMfBgZDd5sLJG/view?usp=drive_link
-https://drive.google.com/file/d/1pE5Tyj0LlGbGWvUzuhixp86Ibu55Ez3I/view?usp=drive_link
-https://drive.google.com/file/d/141668b1VzX80ncrVJPzhkoAeIFB4MEK9/view?usp=drive_link
-https://drive.google.com/file/d/1bw12lo37p1ZvRvErHsll7cEYi2OxscvZ/view?usp=drive_link
-https://drive.google.com/file/d/1zfnMFvbgBjl6SzYhksbaOzfbwLrCN6tb/view?usp=drive_link
-https://drive.google.com/file/d/1-GIszA6mUJMaNB-tdh9r9skc77SWA0VX/view?usp=drive_link
-https://drive.google.com/file/d/1fTB0zWFYU6zh4IIUFT2zX_OkwYqmElwY/view?usp=drive_link
-https://drive.google.com/file/d/1gPIPNKGmrO9c7gKF7SP0SuUYbIBBq8z1/view?usp=drive_link
-https://drive.google.com/file/d/12JeJ-dQd5lYyn6PlDOGdE-ChVeiZ-Uv0/view?usp=drive_link
-https://drive.google.com/file/d/100_20cgCqerU6qoh3TfTbwLy9mlDAFEG/view?usp=drive_link
-https://drive.google.com/file/d/111oAGJ76ku_pYgbBoIdZAC1_XEQcPI__/view?usp=drive_link
-https://drive.google.com/file/d/1UhC8L-354ZQ2gblPFGI35EMsVwfpuKa0/view?usp=drive_link
-https://drive.google.com/file/d/1sIXQSgUR_xdrNtGrL6QGBnkLMKErsIp1/view?usp=drive_link
-https://drive.google.com/file/d/16Ax77bDSIXnsn4GFL8XYKKT1P6bPpfMd/view?usp=drive_link
-https://drive.google.com/file/d/1pgRVYwwVIsWq_qsWqZpe1UBzZfF5Fa9D/view?usp=drive_link
-https://drive.google.com/file/d/1jtimaZkWsY1P5gC2bbS64H_WCUU7HXN2/view?usp=drive_link
-https://drive.google.com/file/d/1N6Bh02P-RiTEgtx1YH1Db_X3TGpP-X_r/view?usp=drive_link
-https://drive.google.com/file/d/14Fy8EwJ8d9Vh97Yt1VOvUChSCrfIjBij/view?usp=drive_link
-https://drive.google.com/file/d/1IRuv42dvIMPuKhcMZmuXaBjJ-lPFOmQd/view?usp=drive_link
-https://drive.google.com/file/d/16XWzNY2D8ucVVn5geBgsVdhm3ppO4que/view?usp=drive_link
-https://drive.google.com/file/d/1xsVOoQgthK_L_SDrmq_JvQgUpAvPEAY8/view?usp=drive_link
-https://drive.google.com/file/d/1bZbw66DyEMvnJnzkdUUNbKjvNKg8KFYM/view?usp=drive_link
-https://drive.google.com/file/d/1CyTVkdrNGGpouCXr4CfhKbMzE6Ah3oo3/view?usp=drive_link
-https://drive.google.com/file/d/1hDRyeM-XEDpHXpptbT8LvNnlQUR3PWOh/view?usp=drive_link
-https://drive.google.com/file/d/1XhHWxbra8Iy5irQZ83IvxwaJqHq9x4s1/view?usp=drive_link
-https://drive.google.com/file/d/1haZcn6aM1o4JlmP9tJj3x2enrxiPaDSD/view?usp=drive_link
-https://drive.google.com/file/d/1ypDyuUTbljaBZ34f-t7lj3O_0bRmyX2n/view?usp=drive_link
-https://drive.google.com/file/d/1ILEEZo_tA9_ChIAprr2mPaNVKZi5vXsO/view?usp=drive_link
-https://drive.google.com/file/d/1U7nVYFaGE8vVTfLCW33D74xOjDcqfgyJ/view?usp=drive_link
-https://drive.google.com/file/d/1rZ93_rmCov5SMDxPkfM3qthcRELZrQX6/view?usp=drive_link
-https://drive.google.com/file/d/1mYO1b_csddtyE3qT6cwLiw-m2w2_1Lxh/view?usp=drive_link
-https://drive.google.com/file/d/1xz7Q5x2jikY8wJQjMRQpRws6AnfWlHm5/view?usp=drive_link
-https://drive.google.com/file/d/1OO8GaO-0FrSZRd1kxMYwBmubyiLOWnbl/view?usp=drive_link
-https://drive.google.com/file/d/1EXn4NVDmf-4_HCy34mYwT-vwK2CFI9ev/view?usp=drive_link
-https://drive.google.com/file/d/10hH70XhXRL9C5SnAG4toHtfHqfJUJo4H/view?usp=drive_link
-https://drive.google.com/file/d/18tiBcxea0guUai4lwsXQvt0q2LZ8ZnnJ/view?usp=drive_link
-https://drive.google.com/file/d/1Q8R8qv37vk5PQ5kQ2ibx6BFLOySD0VpX/view?usp=drive_link
-https://drive.google.com/file/d/17aNriHzjhdibCyuUjQoMFZqjybJZtggG/view?usp=drive_link
-https://drive.google.com/file/d/1LVjEYHSdeKm6CotU1QguIeNEPaIaFl_1/view?usp=drive_link
-https://drive.google.com/file/d/1ufAhE_EkgJ85slg2EW8aW_grOzE_Lmxd/view?usp=drive_link
-https://drive.google.com/file/d/1wtzLtXrkw9eXRGESTPIOlpl1tInu-b2m/view?usp=drive_link
-https://drive.google.com/file/d/1Mk5qvVtD_QHwGOUApRq76TUw2T5THu6f/view?usp=drive_link
-https://drive.google.com/file/d/1y1WQ3hboWVJ68KEYQQ3OhreGuaUpSgwc/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_vinh_cup_left.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_vinh_cup_left.txt
deleted file mode 100644
index 8823a9b594..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_vinh_cup_left.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-https://drive.google.com/drive/folders/1dxWh6YFZUDt6qXIoxgD9bla3CiFjZ11C
-https://drive.google.com/file/d/1hNBJN00SCAlOl0ZEgm7RRGbAGDjyBs0p/view?usp=drive_link
-https://drive.google.com/file/d/17He0CVwXGeoMmXg4SHKo-osNn7YPKVL7/view?usp=drive_link
-https://drive.google.com/file/d/1laNKUVID1x2CV6a2O2WQjwFewKu4lidL/view?usp=drive_link
-https://drive.google.com/file/d/1pNf36xbZJGRArYLmNAvRj5y6CoqdC6kB/view?usp=drive_link
-https://drive.google.com/file/d/1_4E1-y3JXk5I0ebycLYM70YDPK9g52gZ/view?usp=drive_link
-https://drive.google.com/file/d/1PHfzhGPdbolKyOpS3FnR2w7Q8zUlJXSk/view?usp=drive_link
-https://drive.google.com/file/d/17ls2PPN-Pi3tEuK059cwV2_iDT8aGhOO/view?usp=drive_link
-https://drive.google.com/file/d/1LWsg6PmCT00Kv_N_slrmcwKmQPGoBT3k/view?usp=drive_link
-https://drive.google.com/file/d/12LckrchoHTUVH7rxi8J7zD9dA19GXvoW/view?usp=drive_link
-https://drive.google.com/file/d/1VqrJKjAIkj5gtFXL69grdSeu9CyaqnSw/view?usp=drive_link
-https://drive.google.com/file/d/1g5rQYDBZvW-kUtYPeyF3qmd53v6k7kXu/view?usp=drive_link
-https://drive.google.com/file/d/10kUgaSJ0TS7teaG83G3Rf_DG4XGrBt6A/view?usp=drive_link
-https://drive.google.com/file/d/1je9XmneZQZvTma5adMJICUPDovW3ppei/view?usp=drive_link
-https://drive.google.com/file/d/1v28r6bedwZGbUPVVTVImXhK-42XdtGfj/view?usp=drive_link
-https://drive.google.com/file/d/1-TEEx9sGVvzMMaNXYfQMtY2JJ6cvl0dT/view?usp=drive_link
-https://drive.google.com/file/d/1YdBKdJFP9rJWBUX7qrOYL_gfUA8o6J9M/view?usp=drive_link
-https://drive.google.com/file/d/1X9vffwQHNUSKLXr2RlYNtbWDIFCIDfdF/view?usp=drive_link
-https://drive.google.com/file/d/11hqesqa5kvEe5FABUnZRcvmOhR373cYM/view?usp=drive_link
-https://drive.google.com/file/d/1ltTTECjEcbQPgS3UPRgMzaE2x9n6H7dC/view?usp=drive_link
-https://drive.google.com/file/d/1Zxqfa29JdwT-bfMpivi6IG2vz34d21dD/view?usp=drive_link
-https://drive.google.com/file/d/11LQlVxS5hz494dYUJ_PNRPx2NHIJbQns/view?usp=drive_link
-https://drive.google.com/file/d/1i1JhNtnZpO_E8rAv8gxBP3ZTZRvcvsZi/view?usp=drive_link
-https://drive.google.com/file/d/11jOXAr2EULUO4Qkm748634lg4UUFho5U/view?usp=drive_link
-https://drive.google.com/file/d/1rj67wur8DdB_Pipwx24bY43xu4X1eQ5e/view?usp=drive_link
-https://drive.google.com/file/d/15ZTm6lO6f_JQy_4SNfrOu3iPYn1Ro8mh/view?usp=drive_link
-https://drive.google.com/file/d/1q4gBtqWPJtCwXEvknGgN0WHGp7Vfn1b9/view?usp=drive_link
-https://drive.google.com/file/d/1t17keyre47AYqm8GgXiQ7EcvcUkeSiDQ/view?usp=drive_link
-https://drive.google.com/file/d/1OYUPGxtZgOF86Ng_BEOTXm_XOYpuQPsO/view?usp=drive_link
-https://drive.google.com/file/d/1cBjbGHi3dwWHtx6r9EQJi0JT_CE3LuHt/view?usp=drive_link
-https://drive.google.com/file/d/14qaMyF0mcbCB-fCYKNyo5_2NahSC6D5u/view?usp=drive_link
-https://drive.google.com/file/d/12FgX86eA7Y5co9ULBVK80XMsiKQSs-Ri/view?usp=drive_link
-https://drive.google.com/file/d/1yvoHWidf-jdBVw6qCCXOFfkVwKj_2hPk/view?usp=drive_link
-https://drive.google.com/file/d/1a2SugsSDlC8UtUrFzp-_KAwyZckQOvdQ/view?usp=drive_link
-https://drive.google.com/file/d/1l8pILBFSAosypWJMza2K09Vm7rug9axm/view?usp=drive_link
-https://drive.google.com/file/d/1hfPQ8dBCk97PnOhq6_MIISm3IEzcOxJG/view?usp=drive_link
-https://drive.google.com/file/d/1PPAUwlJCFKpms8cqF_k1v2_fCgDBOc3S/view?usp=drive_link
-https://drive.google.com/file/d/1lVKQZeqFfK3amEmLuFhYLUFQ2eyE8rOW/view?usp=drive_link
-https://drive.google.com/file/d/1K9iPMLfDowcIFoyzpvgn88dQ6x6kVwNG/view?usp=drive_link
-https://drive.google.com/file/d/1PNvMqG9tL7QxeLaYBGHiWYR6SYb5iIct/view?usp=drive_link
-https://drive.google.com/file/d/1xkRtzbvIkUsylx9hrFLGQsJn0h1EYu-5/view?usp=drive_link
-https://drive.google.com/file/d/1nxMRrJlSayjDIfr5CmHO1NzAw3COhsLi/view?usp=drive_link
-https://drive.google.com/file/d/1Qs3WEyMGrmagiHIkkFEueWNnJhkUeR1s/view?usp=drive_link
-https://drive.google.com/file/d/1D-G2_Q0SS3M8zyJbg_XzkF2ANPw1HTuX/view?usp=drive_link
-https://drive.google.com/file/d/1mdmJsDGO-YtJAOF_yPKl6lq4PJOIbQhT/view?usp=drive_link
-https://drive.google.com/file/d/11m9bwfop_sPmnQr_8amB6EEsrbAeG_z5/view?usp=drive_link
-https://drive.google.com/file/d/19tyYt5FMn5kru0g9o2nMJhKPnsDqkIZv/view?usp=drive_link
-https://drive.google.com/file/d/1XvTpUdsVTZ-vydvdYYmynbma--HfUGSl/view?usp=drive_link
-https://drive.google.com/file/d/1MO3hFu68J6NohTzr9aB_fY02VA6QSOqj/view?usp=drive_link
-https://drive.google.com/file/d/1Lh-UjwAk__04YOTWINF_QGVU8SjetVaY/view?usp=drive_link
-https://drive.google.com/file/d/1jkSOUwZV5GJ7rZlVeErjcu0DBQs8Np0d/view?usp=drive_link
-https://drive.google.com/file/d/1VIN1eLI-93WrVQwCjsv6XQr353DqqBYA/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_ziploc_slide.txt b/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_ziploc_slide.txt
deleted file mode 100644
index 5db6ed9538..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_aloha_raw_urls/static_ziploc_slide.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-https://drive.google.com/drive/folders/1EgKar7rWBmTIRmeJYZciSwjZx3uP2mHO
-https://drive.google.com/file/d/12eYWQO15atK2hBjXhynPJd9MKAj_42pz/view?usp=drive_link
-https://drive.google.com/file/d/1Ul4oEeICJDjgfYTl4H1uaisTzVYIM6wd/view?usp=drive_link
-https://drive.google.com/file/d/1WSF-OG8lKSe2wVYCv5D1aJNipxpgddk-/view?usp=drive_link
-https://drive.google.com/file/d/1_ppD5j5sFh26aWW0JmhLzJMeNB-lCArk/view?usp=drive_link
-https://drive.google.com/file/d/1WUp846dgWXYhu4oJfhHxiU6YL_7N6s4W/view?usp=drive_link
-https://drive.google.com/file/d/1HRZNAIoAQw_uYiPwnBvtBioQoqiqoXdA/view?usp=drive_link
-https://drive.google.com/file/d/1hedGq-QDMnIn8GlXXBC3GiEJ_Y-LTxyt/view?usp=drive_link
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_diffusion_policy_replay_buffer.py b/lerobot/common/datasets/push_dataset_to_hub/_diffusion_policy_replay_buffer.py
deleted file mode 100644
index 33b4c9745d..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_diffusion_policy_replay_buffer.py
+++ /dev/null
@@ -1,634 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Helper code for loading PushT dataset from Diffusion Policy (https://diffusion-policy.cs.columbia.edu/)
-
-Copied from the original Diffusion Policy repository and used in our `download_and_upload_dataset.py` script.
-"""
-
-from __future__ import annotations
-
-import math
-import numbers
-import os
-from functools import cached_property
-
-import numcodecs
-import numpy as np
-import zarr
-
-
-def check_chunks_compatible(chunks: tuple, shape: tuple):
- assert len(shape) == len(chunks)
- for c in chunks:
- assert isinstance(c, numbers.Integral)
- assert c > 0
-
-
-def rechunk_recompress_array(group, name, chunks=None, chunk_length=None, compressor=None, tmp_key="_temp"):
- old_arr = group[name]
- if chunks is None:
- chunks = (chunk_length,) + old_arr.chunks[1:] if chunk_length is not None else old_arr.chunks
- check_chunks_compatible(chunks, old_arr.shape)
-
- if compressor is None:
- compressor = old_arr.compressor
-
- if (chunks == old_arr.chunks) and (compressor == old_arr.compressor):
- # no change
- return old_arr
-
- # rechunk recompress
- group.move(name, tmp_key)
- old_arr = group[tmp_key]
- n_copied, n_skipped, n_bytes_copied = zarr.copy(
- source=old_arr,
- dest=group,
- name=name,
- chunks=chunks,
- compressor=compressor,
- )
- del group[tmp_key]
- arr = group[name]
- return arr
-
-
-def get_optimal_chunks(shape, dtype, target_chunk_bytes=2e6, max_chunk_length=None):
- """
- Common shapes
- T,D
- T,N,D
- T,H,W,C
- T,N,H,W,C
- """
- itemsize = np.dtype(dtype).itemsize
- # reversed
- rshape = list(shape[::-1])
- if max_chunk_length is not None:
- rshape[-1] = int(max_chunk_length)
- split_idx = len(shape) - 1
- for i in range(len(shape) - 1):
- this_chunk_bytes = itemsize * np.prod(rshape[:i])
- next_chunk_bytes = itemsize * np.prod(rshape[: i + 1])
- if this_chunk_bytes <= target_chunk_bytes and next_chunk_bytes > target_chunk_bytes:
- split_idx = i
-
- rchunks = rshape[:split_idx]
- item_chunk_bytes = itemsize * np.prod(rshape[:split_idx])
- this_max_chunk_length = rshape[split_idx]
- next_chunk_length = min(this_max_chunk_length, math.ceil(target_chunk_bytes / item_chunk_bytes))
- rchunks.append(next_chunk_length)
- len_diff = len(shape) - len(rchunks)
- rchunks.extend([1] * len_diff)
- chunks = tuple(rchunks[::-1])
- # print(np.prod(chunks) * itemsize / target_chunk_bytes)
- return chunks
-
-
-class ReplayBuffer:
- """
- Zarr-based temporal datastructure.
- Assumes first dimension to be time. Only chunk in time dimension.
- """
-
- def __init__(self, root: zarr.Group | dict[str, dict]):
- """
- Dummy constructor. Use copy_from* and create_from* class methods instead.
- """
- assert "data" in root
- assert "meta" in root
- assert "episode_ends" in root["meta"]
- for value in root["data"].values():
- assert value.shape[0] == root["meta"]["episode_ends"][-1]
- self.root = root
-
- # ============= create constructors ===============
- @classmethod
- def create_empty_zarr(cls, storage=None, root=None):
- if root is None:
- if storage is None:
- storage = zarr.MemoryStore()
- root = zarr.group(store=storage)
- root.require_group("data", overwrite=False)
- meta = root.require_group("meta", overwrite=False)
- if "episode_ends" not in meta:
- meta.zeros("episode_ends", shape=(0,), dtype=np.int64, compressor=None, overwrite=False)
- return cls(root=root)
-
- @classmethod
- def create_empty_numpy(cls):
- root = {"data": {}, "meta": {"episode_ends": np.zeros((0,), dtype=np.int64)}}
- return cls(root=root)
-
- @classmethod
- def create_from_group(cls, group, **kwargs):
- if "data" not in group:
- # create from stratch
- buffer = cls.create_empty_zarr(root=group, **kwargs)
- else:
- # already exist
- buffer = cls(root=group, **kwargs)
- return buffer
-
- @classmethod
- def create_from_path(cls, zarr_path, mode="r", **kwargs):
- """
- Open a on-disk zarr directly (for dataset larger than memory).
- Slower.
- """
- group = zarr.open(os.path.expanduser(zarr_path), mode)
- return cls.create_from_group(group, **kwargs)
-
- # ============= copy constructors ===============
- @classmethod
- def copy_from_store(
- cls,
- src_store,
- store=None,
- keys=None,
- chunks: dict[str, tuple] | None = None,
- compressors: dict | str | numcodecs.abc.Codec | None = None,
- if_exists="replace",
- **kwargs,
- ):
- """
- Load to memory.
- """
- src_root = zarr.group(src_store)
- if chunks is None:
- chunks = {}
- if compressors is None:
- compressors = {}
- root = None
- if store is None:
- # numpy backend
- meta = {}
- for key, value in src_root["meta"].items():
- if len(value.shape) == 0:
- meta[key] = np.array(value)
- else:
- meta[key] = value[:]
-
- if keys is None:
- keys = src_root["data"].keys()
- data = {}
- for key in keys:
- arr = src_root["data"][key]
- data[key] = arr[:]
-
- root = {"meta": meta, "data": data}
- else:
- root = zarr.group(store=store)
- # copy without recompression
- n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
- source=src_store, dest=store, source_path="/meta", dest_path="/meta", if_exists=if_exists
- )
- data_group = root.create_group("data", overwrite=True)
- if keys is None:
- keys = src_root["data"].keys()
- for key in keys:
- value = src_root["data"][key]
- cks = cls._resolve_array_chunks(chunks=chunks, key=key, array=value)
- cpr = cls._resolve_array_compressor(compressors=compressors, key=key, array=value)
- if cks == value.chunks and cpr == value.compressor:
- # copy without recompression
- this_path = "/data/" + key
- n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
- source=src_store,
- dest=store,
- source_path=this_path,
- dest_path=this_path,
- if_exists=if_exists,
- )
- else:
- # copy with recompression
- n_copied, n_skipped, n_bytes_copied = zarr.copy(
- source=value,
- dest=data_group,
- name=key,
- chunks=cks,
- compressor=cpr,
- if_exists=if_exists,
- )
- buffer = cls(root=root)
- return buffer
-
- @classmethod
- def copy_from_path(
- cls,
- zarr_path,
- backend=None,
- store=None,
- keys=None,
- chunks: dict[str, tuple] | None = None,
- compressors: dict | str | numcodecs.abc.Codec | None = None,
- if_exists="replace",
- **kwargs,
- ):
- """
- Copy a on-disk zarr to in-memory compressed.
- Recommended
- """
- if chunks is None:
- chunks = {}
- if compressors is None:
- compressors = {}
- if backend == "numpy":
- print("backend argument is deprecated!")
- store = None
- group = zarr.open(os.path.expanduser(zarr_path), "r")
- return cls.copy_from_store(
- src_store=group.store,
- store=store,
- keys=keys,
- chunks=chunks,
- compressors=compressors,
- if_exists=if_exists,
- **kwargs,
- )
-
- # ============= save methods ===============
- def save_to_store(
- self,
- store,
- chunks: dict[str, tuple] | None = None,
- compressors: str | numcodecs.abc.Codec | dict | None = None,
- if_exists="replace",
- **kwargs,
- ):
- root = zarr.group(store)
- if chunks is None:
- chunks = {}
- if compressors is None:
- compressors = {}
- if self.backend == "zarr":
- # recompression free copy
- n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
- source=self.root.store,
- dest=store,
- source_path="/meta",
- dest_path="/meta",
- if_exists=if_exists,
- )
- else:
- meta_group = root.create_group("meta", overwrite=True)
- # save meta, no chunking
- for key, value in self.root["meta"].items():
- _ = meta_group.array(name=key, data=value, shape=value.shape, chunks=value.shape)
-
- # save data, chunk
- data_group = root.create_group("data", overwrite=True)
- for key, value in self.root["data"].items():
- cks = self._resolve_array_chunks(chunks=chunks, key=key, array=value)
- cpr = self._resolve_array_compressor(compressors=compressors, key=key, array=value)
- if isinstance(value, zarr.Array):
- if cks == value.chunks and cpr == value.compressor:
- # copy without recompression
- this_path = "/data/" + key
- n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
- source=self.root.store,
- dest=store,
- source_path=this_path,
- dest_path=this_path,
- if_exists=if_exists,
- )
- else:
- # copy with recompression
- n_copied, n_skipped, n_bytes_copied = zarr.copy(
- source=value,
- dest=data_group,
- name=key,
- chunks=cks,
- compressor=cpr,
- if_exists=if_exists,
- )
- else:
- # numpy
- _ = data_group.array(name=key, data=value, chunks=cks, compressor=cpr)
- return store
-
- def save_to_path(
- self,
- zarr_path,
- chunks: dict[str, tuple] | None = None,
- compressors: str | numcodecs.abc.Codec | dict | None = None,
- if_exists="replace",
- **kwargs,
- ):
- if chunks is None:
- chunks = {}
- if compressors is None:
- compressors = {}
- store = zarr.DirectoryStore(os.path.expanduser(zarr_path))
- return self.save_to_store(
- store, chunks=chunks, compressors=compressors, if_exists=if_exists, **kwargs
- )
-
- @staticmethod
- def resolve_compressor(compressor="default"):
- if compressor == "default":
- compressor = numcodecs.Blosc(cname="lz4", clevel=5, shuffle=numcodecs.Blosc.NOSHUFFLE)
- elif compressor == "disk":
- compressor = numcodecs.Blosc("zstd", clevel=5, shuffle=numcodecs.Blosc.BITSHUFFLE)
- return compressor
-
- @classmethod
- def _resolve_array_compressor(cls, compressors: dict | str | numcodecs.abc.Codec, key, array):
- # allows compressor to be explicitly set to None
- cpr = "nil"
- if isinstance(compressors, dict):
- if key in compressors:
- cpr = cls.resolve_compressor(compressors[key])
- elif isinstance(array, zarr.Array):
- cpr = array.compressor
- else:
- cpr = cls.resolve_compressor(compressors)
- # backup default
- if cpr == "nil":
- cpr = cls.resolve_compressor("default")
- return cpr
-
- @classmethod
- def _resolve_array_chunks(cls, chunks: dict | tuple, key, array):
- cks = None
- if isinstance(chunks, dict):
- if key in chunks:
- cks = chunks[key]
- elif isinstance(array, zarr.Array):
- cks = array.chunks
- elif isinstance(chunks, tuple):
- cks = chunks
- else:
- raise TypeError(f"Unsupported chunks type {type(chunks)}")
- # backup default
- if cks is None:
- cks = get_optimal_chunks(shape=array.shape, dtype=array.dtype)
- # check
- check_chunks_compatible(chunks=cks, shape=array.shape)
- return cks
-
- # ============= properties =================
- @cached_property
- def data(self):
- return self.root["data"]
-
- @cached_property
- def meta(self):
- return self.root["meta"]
-
- def update_meta(self, data):
- # sanitize data
- np_data = {}
- for key, value in data.items():
- if isinstance(value, np.ndarray):
- np_data[key] = value
- else:
- arr = np.array(value)
- if arr.dtype == object:
- raise TypeError(f"Invalid value type {type(value)}")
- np_data[key] = arr
-
- meta_group = self.meta
- if self.backend == "zarr":
- for key, value in np_data.items():
- _ = meta_group.array(
- name=key, data=value, shape=value.shape, chunks=value.shape, overwrite=True
- )
- else:
- meta_group.update(np_data)
-
- return meta_group
-
- @property
- def episode_ends(self):
- return self.meta["episode_ends"]
-
- def get_episode_idxs(self):
- import numba
-
- numba.jit(nopython=True)
-
- def _get_episode_idxs(episode_ends):
- result = np.zeros((episode_ends[-1],), dtype=np.int64)
- for i in range(len(episode_ends)):
- start = 0
- if i > 0:
- start = episode_ends[i - 1]
- end = episode_ends[i]
- for idx in range(start, end):
- result[idx] = i
- return result
-
- return _get_episode_idxs(self.episode_ends)
-
- @property
- def backend(self):
- backend = "numpy"
- if isinstance(self.root, zarr.Group):
- backend = "zarr"
- return backend
-
- # =========== dict-like API ==============
- def __repr__(self) -> str:
- if self.backend == "zarr":
- return str(self.root.tree())
- else:
- return super().__repr__()
-
- def keys(self):
- return self.data.keys()
-
- def values(self):
- return self.data.values()
-
- def items(self):
- return self.data.items()
-
- def __getitem__(self, key):
- return self.data[key]
-
- def __contains__(self, key):
- return key in self.data
-
- # =========== our API ==============
- @property
- def n_steps(self):
- if len(self.episode_ends) == 0:
- return 0
- return self.episode_ends[-1]
-
- @property
- def n_episodes(self):
- return len(self.episode_ends)
-
- @property
- def chunk_size(self):
- if self.backend == "zarr":
- return next(iter(self.data.arrays()))[-1].chunks[0]
- return None
-
- @property
- def episode_lengths(self):
- ends = self.episode_ends[:]
- ends = np.insert(ends, 0, 0)
- lengths = np.diff(ends)
- return lengths
-
- def add_episode(
- self,
- data: dict[str, np.ndarray],
- chunks: dict[str, tuple] | None = None,
- compressors: str | numcodecs.abc.Codec | dict | None = None,
- ):
- if chunks is None:
- chunks = {}
- if compressors is None:
- compressors = {}
- assert len(data) > 0
- is_zarr = self.backend == "zarr"
-
- curr_len = self.n_steps
- episode_length = None
- for value in data.values():
- assert len(value.shape) >= 1
- if episode_length is None:
- episode_length = len(value)
- else:
- assert episode_length == len(value)
- new_len = curr_len + episode_length
-
- for key, value in data.items():
- new_shape = (new_len,) + value.shape[1:]
- # create array
- if key not in self.data:
- if is_zarr:
- cks = self._resolve_array_chunks(chunks=chunks, key=key, array=value)
- cpr = self._resolve_array_compressor(compressors=compressors, key=key, array=value)
- arr = self.data.zeros(
- name=key, shape=new_shape, chunks=cks, dtype=value.dtype, compressor=cpr
- )
- else:
- # copy data to prevent modify
- arr = np.zeros(shape=new_shape, dtype=value.dtype)
- self.data[key] = arr
- else:
- arr = self.data[key]
- assert value.shape[1:] == arr.shape[1:]
- # same method for both zarr and numpy
- if is_zarr:
- arr.resize(new_shape)
- else:
- arr.resize(new_shape, refcheck=False)
- # copy data
- arr[-value.shape[0] :] = value
-
- # append to episode ends
- episode_ends = self.episode_ends
- if is_zarr:
- episode_ends.resize(episode_ends.shape[0] + 1)
- else:
- episode_ends.resize(episode_ends.shape[0] + 1, refcheck=False)
- episode_ends[-1] = new_len
-
- # rechunk
- if is_zarr and episode_ends.chunks[0] < episode_ends.shape[0]:
- rechunk_recompress_array(self.meta, "episode_ends", chunk_length=int(episode_ends.shape[0] * 1.5))
-
- def drop_episode(self):
- is_zarr = self.backend == "zarr"
- episode_ends = self.episode_ends[:].copy()
- assert len(episode_ends) > 0
- start_idx = 0
- if len(episode_ends) > 1:
- start_idx = episode_ends[-2]
- for value in self.data.values():
- new_shape = (start_idx,) + value.shape[1:]
- if is_zarr:
- value.resize(new_shape)
- else:
- value.resize(new_shape, refcheck=False)
- if is_zarr:
- self.episode_ends.resize(len(episode_ends) - 1)
- else:
- self.episode_ends.resize(len(episode_ends) - 1, refcheck=False)
-
- def pop_episode(self):
- assert self.n_episodes > 0
- episode = self.get_episode(self.n_episodes - 1, copy=True)
- self.drop_episode()
- return episode
-
- def extend(self, data):
- self.add_episode(data)
-
- def get_episode(self, idx, copy=False):
- idx = list(range(len(self.episode_ends)))[idx]
- start_idx = 0
- if idx > 0:
- start_idx = self.episode_ends[idx - 1]
- end_idx = self.episode_ends[idx]
- result = self.get_steps_slice(start_idx, end_idx, copy=copy)
- return result
-
- def get_episode_slice(self, idx):
- start_idx = 0
- if idx > 0:
- start_idx = self.episode_ends[idx - 1]
- end_idx = self.episode_ends[idx]
- return slice(start_idx, end_idx)
-
- def get_steps_slice(self, start, stop, step=None, copy=False):
- _slice = slice(start, stop, step)
-
- result = {}
- for key, value in self.data.items():
- x = value[_slice]
- if copy and isinstance(value, np.ndarray):
- x = x.copy()
- result[key] = x
- return result
-
- # =========== chunking =============
- def get_chunks(self) -> dict:
- assert self.backend == "zarr"
- chunks = {}
- for key, value in self.data.items():
- chunks[key] = value.chunks
- return chunks
-
- def set_chunks(self, chunks: dict):
- assert self.backend == "zarr"
- for key, value in chunks.items():
- if key in self.data:
- arr = self.data[key]
- if value != arr.chunks:
- check_chunks_compatible(chunks=value, shape=arr.shape)
- rechunk_recompress_array(self.data, key, chunks=value)
-
- def get_compressors(self) -> dict:
- assert self.backend == "zarr"
- compressors = {}
- for key, value in self.data.items():
- compressors[key] = value.compressor
- return compressors
-
- def set_compressors(self, compressors: dict):
- assert self.backend == "zarr"
- for key, value in compressors.items():
- if key in self.data:
- arr = self.data[key]
- compressor = self.resolve_compressor(value)
- if compressor != arr.compressor:
- rechunk_recompress_array(self.data, key, compressor=compressor)
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_download_raw.py b/lerobot/common/datasets/push_dataset_to_hub/_download_raw.py
deleted file mode 100644
index edeaf0933f..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_download_raw.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-This file contains download scripts for raw datasets.
-
-Example of usage:
-```
-python lerobot/common/datasets/push_dataset_to_hub/_download_raw.py \
---raw-dir data/lerobot-raw/pusht_raw \
---repo-id lerobot-raw/pusht_raw
-```
-"""
-
-import argparse
-import logging
-import warnings
-from pathlib import Path
-
-from huggingface_hub import snapshot_download
-
-from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
-
-# {raw_repo_id: raw_format}
-AVAILABLE_RAW_REPO_IDS = {
- "lerobot-raw/aloha_mobile_cabinet_raw": "aloha_hdf5",
- "lerobot-raw/aloha_mobile_chair_raw": "aloha_hdf5",
- "lerobot-raw/aloha_mobile_elevator_raw": "aloha_hdf5",
- "lerobot-raw/aloha_mobile_shrimp_raw": "aloha_hdf5",
- "lerobot-raw/aloha_mobile_wash_pan_raw": "aloha_hdf5",
- "lerobot-raw/aloha_mobile_wipe_wine_raw": "aloha_hdf5",
- "lerobot-raw/aloha_sim_insertion_human_raw": "aloha_hdf5",
- "lerobot-raw/aloha_sim_insertion_scripted_raw": "aloha_hdf5",
- "lerobot-raw/aloha_sim_transfer_cube_human_raw": "aloha_hdf5",
- "lerobot-raw/aloha_sim_transfer_cube_scripted_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_battery_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_candy_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_coffee_new_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_coffee_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_cups_open_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_fork_pick_up_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_pingpong_test_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_pro_pencil_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_screw_driver_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_tape_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_thread_velcro_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_towel_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_vinh_cup_left_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_vinh_cup_raw": "aloha_hdf5",
- "lerobot-raw/aloha_static_ziploc_slide_raw": "aloha_hdf5",
- "lerobot-raw/umi_cup_in_the_wild_raw": "umi_zarr",
- "lerobot-raw/pusht_raw": "pusht_zarr",
- "lerobot-raw/unitreeh1_fold_clothes_raw": "aloha_hdf5",
- "lerobot-raw/unitreeh1_rearrange_objects_raw": "aloha_hdf5",
- "lerobot-raw/unitreeh1_two_robot_greeting_raw": "aloha_hdf5",
- "lerobot-raw/unitreeh1_warehouse_raw": "aloha_hdf5",
- "lerobot-raw/xarm_lift_medium_raw": "xarm_pkl",
- "lerobot-raw/xarm_lift_medium_replay_raw": "xarm_pkl",
- "lerobot-raw/xarm_push_medium_raw": "xarm_pkl",
- "lerobot-raw/xarm_push_medium_replay_raw": "xarm_pkl",
- "lerobot-raw/fractal20220817_data_raw": "openx_rlds.fractal20220817_data",
- "lerobot-raw/kuka_raw": "openx_rlds.kuka",
- "lerobot-raw/bridge_openx_raw": "openx_rlds.bridge_openx",
- "lerobot-raw/taco_play_raw": "openx_rlds.taco_play",
- "lerobot-raw/jaco_play_raw": "openx_rlds.jaco_play",
- "lerobot-raw/berkeley_cable_routing_raw": "openx_rlds.berkeley_cable_routing",
- "lerobot-raw/roboturk_raw": "openx_rlds.roboturk",
- "lerobot-raw/nyu_door_opening_surprising_effectiveness_raw": "openx_rlds.nyu_door_opening_surprising_effectiveness",
- "lerobot-raw/viola_raw": "openx_rlds.viola",
- "lerobot-raw/berkeley_autolab_ur5_raw": "openx_rlds.berkeley_autolab_ur5",
- "lerobot-raw/toto_raw": "openx_rlds.toto",
- "lerobot-raw/language_table_raw": "openx_rlds.language_table",
- "lerobot-raw/columbia_cairlab_pusht_real_raw": "openx_rlds.columbia_cairlab_pusht_real",
- "lerobot-raw/stanford_kuka_multimodal_dataset_raw": "openx_rlds.stanford_kuka_multimodal_dataset",
- "lerobot-raw/nyu_rot_dataset_raw": "openx_rlds.nyu_rot_dataset",
- "lerobot-raw/io_ai_tech_raw": "openx_rlds.io_ai_tech",
- "lerobot-raw/stanford_hydra_dataset_raw": "openx_rlds.stanford_hydra_dataset",
- "lerobot-raw/austin_buds_dataset_raw": "openx_rlds.austin_buds_dataset",
- "lerobot-raw/nyu_franka_play_dataset_raw": "openx_rlds.nyu_franka_play_dataset",
- "lerobot-raw/maniskill_dataset_raw": "openx_rlds.maniskill_dataset",
- "lerobot-raw/furniture_bench_dataset_raw": "openx_rlds.furniture_bench_dataset",
- "lerobot-raw/cmu_franka_exploration_dataset_raw": "openx_rlds.cmu_franka_exploration_dataset",
- "lerobot-raw/ucsd_kitchen_dataset_raw": "openx_rlds.ucsd_kitchen_dataset",
- "lerobot-raw/ucsd_pick_and_place_dataset_raw": "openx_rlds.ucsd_pick_and_place_dataset",
- "lerobot-raw/spoc_raw": "openx_rlds.spoc",
- "lerobot-raw/austin_sailor_dataset_raw": "openx_rlds.austin_sailor_dataset",
- "lerobot-raw/austin_sirius_dataset_raw": "openx_rlds.austin_sirius_dataset",
- "lerobot-raw/bc_z_raw": "openx_rlds.bc_z",
- "lerobot-raw/utokyo_pr2_opening_fridge_raw": "openx_rlds.utokyo_pr2_opening_fridge",
- "lerobot-raw/utokyo_pr2_tabletop_manipulation_raw": "openx_rlds.utokyo_pr2_tabletop_manipulation",
- "lerobot-raw/utokyo_xarm_pick_and_place_raw": "openx_rlds.utokyo_xarm_pick_and_place",
- "lerobot-raw/utokyo_xarm_bimanual_raw": "openx_rlds.utokyo_xarm_bimanual",
- "lerobot-raw/utokyo_saytap_raw": "openx_rlds.utokyo_saytap",
- "lerobot-raw/robo_net_raw": "openx_rlds.robo_net",
- "lerobot-raw/robo_set_raw": "openx_rlds.robo_set",
- "lerobot-raw/berkeley_mvp_raw": "openx_rlds.berkeley_mvp",
- "lerobot-raw/berkeley_rpt_raw": "openx_rlds.berkeley_rpt",
- "lerobot-raw/kaist_nonprehensile_raw": "openx_rlds.kaist_nonprehensile",
- "lerobot-raw/stanford_mask_vit_raw": "openx_rlds.stanford_mask_vit",
- "lerobot-raw/tokyo_u_lsmo_raw": "openx_rlds.tokyo_u_lsmo",
- "lerobot-raw/dlr_sara_pour_raw": "openx_rlds.dlr_sara_pour",
- "lerobot-raw/dlr_sara_grid_clamp_raw": "openx_rlds.dlr_sara_grid_clamp",
- "lerobot-raw/dlr_edan_shared_control_raw": "openx_rlds.dlr_edan_shared_control",
- "lerobot-raw/asu_table_top_raw": "openx_rlds.asu_table_top",
- "lerobot-raw/stanford_robocook_raw": "openx_rlds.stanford_robocook",
- "lerobot-raw/imperialcollege_sawyer_wrist_cam_raw": "openx_rlds.imperialcollege_sawyer_wrist_cam",
- "lerobot-raw/iamlab_cmu_pickup_insert_raw": "openx_rlds.iamlab_cmu_pickup_insert",
- "lerobot-raw/uiuc_d3field_raw": "openx_rlds.uiuc_d3field",
- "lerobot-raw/utaustin_mutex_raw": "openx_rlds.utaustin_mutex",
- "lerobot-raw/berkeley_fanuc_manipulation_raw": "openx_rlds.berkeley_fanuc_manipulation",
- "lerobot-raw/cmu_playing_with_food_raw": "openx_rlds.cmu_playing_with_food",
- "lerobot-raw/cmu_play_fusion_raw": "openx_rlds.cmu_play_fusion",
- "lerobot-raw/cmu_stretch_raw": "openx_rlds.cmu_stretch",
- "lerobot-raw/berkeley_gnm_recon_raw": "openx_rlds.berkeley_gnm_recon",
- "lerobot-raw/berkeley_gnm_cory_hall_raw": "openx_rlds.berkeley_gnm_cory_hall",
- "lerobot-raw/berkeley_gnm_sac_son_raw": "openx_rlds.berkeley_gnm_sac_son",
- "lerobot-raw/droid_raw": "openx_rlds.droid",
- "lerobot-raw/droid_100_raw": "openx_rlds.droid100",
- "lerobot-raw/fmb_raw": "openx_rlds.fmb",
- "lerobot-raw/dobbe_raw": "openx_rlds.dobbe",
- "lerobot-raw/usc_cloth_sim_raw": "openx_rlds.usc_cloth_sim",
- "lerobot-raw/plex_robosuite_raw": "openx_rlds.plex_robosuite",
- "lerobot-raw/conq_hose_manipulation_raw": "openx_rlds.conq_hose_manipulation",
- "lerobot-raw/vima_raw": "openx_rlds.vima",
- "lerobot-raw/robot_vqa_raw": "openx_rlds.robot_vqa",
- "lerobot-raw/mimic_play_raw": "openx_rlds.mimic_play",
- "lerobot-raw/tidybot_raw": "openx_rlds.tidybot",
- "lerobot-raw/eth_agent_affordances_raw": "openx_rlds.eth_agent_affordances",
-}
-
-
-def download_raw(raw_dir: Path, repo_id: str):
- check_repo_id(repo_id)
- user_id, dataset_id = repo_id.split("/")
-
- if not dataset_id.endswith("_raw"):
- warnings.warn(
- f"""`dataset_id` ({dataset_id}) doesn't end with '_raw' (e.g. 'lerobot/pusht_raw'). Following this
- naming convention by renaming your repository is advised, but not mandatory.""",
- stacklevel=1,
- )
-
- # Send warning if raw_dir isn't well formated
- if raw_dir.parts[-2] != user_id or raw_dir.parts[-1] != dataset_id:
- warnings.warn(
- f"""`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that
- match the `repo_id` (e.g. 'data/lerobot/pusht_raw'). Following this naming convention is advised,
- but not mandatory.""",
- stacklevel=1,
- )
- raw_dir.mkdir(parents=True, exist_ok=True)
-
- logging.info(f"Start downloading from huggingface.co/{user_id} for {dataset_id}")
- snapshot_download(repo_id, repo_type="dataset", local_dir=raw_dir)
- logging.info(f"Finish downloading from huggingface.co/{user_id} for {dataset_id}")
-
-
-def download_all_raw_datasets(data_dir: Path | None = None):
- if data_dir is None:
- data_dir = Path("data")
- for repo_id in AVAILABLE_RAW_REPO_IDS:
- raw_dir = data_dir / repo_id
- download_raw(raw_dir, repo_id)
-
-
-def main():
- parser = argparse.ArgumentParser(
- description=f"""A script to download raw datasets from Hugging Face hub to a local directory. Here is a
- non exhaustive list of available repositories to use in `--repo-id`: {list(AVAILABLE_RAW_REPO_IDS.keys())}""",
- )
-
- parser.add_argument(
- "--raw-dir",
- type=Path,
- required=True,
- help="Directory containing input raw datasets (e.g. `data/aloha_mobile_chair_raw` or `data/pusht_raw).",
- )
- parser.add_argument(
- "--repo-id",
- type=str,
- required=True,
- help="""Repositery identifier on Hugging Face: a community or a user name `/` the name of
- the dataset (e.g. `lerobot/pusht_raw`, `cadene/aloha_sim_insertion_human_raw`).""",
- )
- args = parser.parse_args()
- download_raw(**vars(args))
-
-
-if __name__ == "__main__":
- main()
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py b/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py
deleted file mode 100644
index 184d79fb22..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Use this script to batch encode lerobot dataset from their raw format to LeRobotDataset and push their updated
-version to the hub. Under the hood, this script reuses 'push_dataset_to_hub.py'. It assumes that you already
-downloaded raw datasets, which you can do with the related '_download_raw.py' script.
-
-For instance, for codebase_version = 'v1.6', the following command was run, assuming raw datasets from
-lerobot-raw were downloaded in 'raw/datasets/directory':
-```bash
-python lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py \
- --raw-dir raw/datasets/directory \
- --raw-repo-ids lerobot-raw \
- --local-dir push/datasets/directory \
- --tests-data-dir tests/data \
- --push-repo lerobot \
- --vcodec libsvtav1 \
- --pix-fmt yuv420p \
- --g 2 \
- --crf 30
-```
-"""
-
-import argparse
-from pathlib import Path
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub._download_raw import AVAILABLE_RAW_REPO_IDS
-from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
-from lerobot.scripts.push_dataset_to_hub import push_dataset_to_hub
-
-
-def get_push_repo_id_from_raw(raw_repo_id: str, push_repo: str) -> str:
- dataset_id_raw = raw_repo_id.split("/")[1]
- dataset_id = dataset_id_raw.removesuffix("_raw")
- return f"{push_repo}/{dataset_id}"
-
-
-def encode_datasets(
- raw_dir: Path,
- raw_repo_ids: list[str],
- push_repo: str,
- vcodec: str,
- pix_fmt: str,
- g: int,
- crf: int,
- local_dir: Path | None = None,
- tests_data_dir: Path | None = None,
- raw_format: str | None = None,
- dry_run: bool = False,
-) -> None:
- if len(raw_repo_ids) == 1 and raw_repo_ids[0].lower() == "lerobot-raw":
- raw_repo_ids_format = AVAILABLE_RAW_REPO_IDS
- else:
- if raw_format is None:
- raise ValueError(raw_format)
- raw_repo_ids_format = {id_: raw_format for id_ in raw_repo_ids}
-
- for raw_repo_id, repo_raw_format in raw_repo_ids_format.items():
- check_repo_id(raw_repo_id)
- dataset_repo_id_push = get_push_repo_id_from_raw(raw_repo_id, push_repo)
- dataset_raw_dir = raw_dir / raw_repo_id
- dataset_dir = local_dir / dataset_repo_id_push if local_dir is not None else None
- encoding = {
- "vcodec": vcodec,
- "pix_fmt": pix_fmt,
- "g": g,
- "crf": crf,
- }
-
- if not (dataset_raw_dir).is_dir():
- raise NotADirectoryError(dataset_raw_dir)
-
- if not dry_run:
- push_dataset_to_hub(
- dataset_raw_dir,
- raw_format=repo_raw_format,
- repo_id=dataset_repo_id_push,
- local_dir=dataset_dir,
- resume=True,
- encoding=encoding,
- tests_data_dir=tests_data_dir,
- )
- else:
- print(
- f"DRY RUN: {dataset_raw_dir} --> {dataset_dir} --> {dataset_repo_id_push}@{CODEBASE_VERSION}"
- )
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--raw-dir",
- type=Path,
- default=Path("data"),
- help="Directory where raw datasets are located.",
- )
- parser.add_argument(
- "--raw-repo-ids",
- type=str,
- nargs="*",
- default=["lerobot-raw"],
- help="""Raw dataset repo ids. if 'lerobot-raw', the keys from `AVAILABLE_RAW_REPO_IDS` will be
- used and raw datasets will be fetched from the 'lerobot-raw/' repo and pushed with their
- associated format. It is assumed that each dataset is located at `raw_dir / raw_repo_id` """,
- )
- parser.add_argument(
- "--raw-format",
- type=str,
- default=None,
- help="""Raw format to use for the raw repo-ids. Must be specified if --raw-repo-ids is not
- 'lerobot-raw'""",
- )
- parser.add_argument(
- "--local-dir",
- type=Path,
- default=None,
- help="""When provided, writes the dataset converted to LeRobotDataset format in this directory
- (e.g. `data/lerobot/aloha_mobile_chair`).""",
- )
- parser.add_argument(
- "--push-repo",
- type=str,
- default="lerobot",
- help="Repo to upload datasets to",
- )
- parser.add_argument(
- "--vcodec",
- type=str,
- default="libsvtav1",
- help="Codec to use for encoding videos",
- )
- parser.add_argument(
- "--pix-fmt",
- type=str,
- default="yuv420p",
- help="Pixel formats (chroma subsampling) to be used for encoding",
- )
- parser.add_argument(
- "--g",
- type=int,
- default=2,
- help="Group of pictures sizes to be used for encoding.",
- )
- parser.add_argument(
- "--crf",
- type=int,
- default=30,
- help="Constant rate factors to be used for encoding.",
- )
- parser.add_argument(
- "--tests-data-dir",
- type=Path,
- default=None,
- help=(
- "When provided, save tests artifacts into the given directory "
- "(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})."
- ),
- )
- parser.add_argument(
- "--dry-run",
- type=int,
- default=0,
- help="If not set to 0, this script won't download or upload anything.",
- )
- args = parser.parse_args()
- encode_datasets(**vars(args))
-
-
-if __name__ == "__main__":
- main()
diff --git a/lerobot/common/datasets/push_dataset_to_hub/_umi_imagecodecs_numcodecs.py b/lerobot/common/datasets/push_dataset_to_hub/_umi_imagecodecs_numcodecs.py
deleted file mode 100644
index a118b7e789..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/_umi_imagecodecs_numcodecs.py
+++ /dev/null
@@ -1,326 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# imagecodecs/numcodecs.py
-
-# Copyright (c) 2021-2022, Christoph Gohlke
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# 3. Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-# Copied from: https://github.com/real-stanford/universal_manipulation_interface/blob/298776ce251f33b6b3185a98d6e7d1f9ad49168b/diffusion_policy/codecs/imagecodecs_numcodecs.py#L1
-"""Additional numcodecs implemented using imagecodecs."""
-
-__version__ = "2022.9.26"
-
-__all__ = ("register_codecs",)
-
-import imagecodecs
-import numpy
-from numcodecs.abc import Codec
-from numcodecs.registry import get_codec, register_codec
-
-# TODO (azouitine): Remove useless codecs
-
-
-def protective_squeeze(x: numpy.ndarray):
- """
- Squeeze dim only if it's not the last dim.
- Image dim expected to be *, H, W, C
- """
- img_shape = x.shape[-3:]
- if len(x.shape) > 3:
- n_imgs = numpy.prod(x.shape[:-3])
- if n_imgs > 1:
- img_shape = (-1,) + img_shape
- return x.reshape(img_shape)
-
-
-def get_default_image_compressor(**kwargs):
- if imagecodecs.JPEGXL:
- # has JPEGXL
- this_kwargs = {
- "effort": 3,
- "distance": 0.3,
- # bug in libjxl, invalid codestream for non-lossless
- # when decoding speed > 1
- "decodingspeed": 1,
- }
- this_kwargs.update(kwargs)
- return JpegXl(**this_kwargs)
- else:
- this_kwargs = {"level": 50}
- this_kwargs.update(kwargs)
- return Jpeg2k(**this_kwargs)
-
-
-class Jpeg2k(Codec):
- """JPEG 2000 codec for numcodecs."""
-
- codec_id = "imagecodecs_jpeg2k"
-
- def __init__(
- self,
- level=None,
- codecformat=None,
- colorspace=None,
- tile=None,
- reversible=None,
- bitspersample=None,
- resolutions=None,
- numthreads=None,
- verbose=0,
- ):
- self.level = level
- self.codecformat = codecformat
- self.colorspace = colorspace
- self.tile = None if tile is None else tuple(tile)
- self.reversible = reversible
- self.bitspersample = bitspersample
- self.resolutions = resolutions
- self.numthreads = numthreads
- self.verbose = verbose
-
- def encode(self, buf):
- buf = protective_squeeze(numpy.asarray(buf))
- return imagecodecs.jpeg2k_encode(
- buf,
- level=self.level,
- codecformat=self.codecformat,
- colorspace=self.colorspace,
- tile=self.tile,
- reversible=self.reversible,
- bitspersample=self.bitspersample,
- resolutions=self.resolutions,
- numthreads=self.numthreads,
- verbose=self.verbose,
- )
-
- def decode(self, buf, out=None):
- return imagecodecs.jpeg2k_decode(buf, verbose=self.verbose, numthreads=self.numthreads, out=out)
-
-
-class JpegXl(Codec):
- """JPEG XL codec for numcodecs."""
-
- codec_id = "imagecodecs_jpegxl"
-
- def __init__(
- self,
- # encode
- level=None,
- effort=None,
- distance=None,
- lossless=None,
- decodingspeed=None,
- photometric=None,
- planar=None,
- usecontainer=None,
- # decode
- index=None,
- keeporientation=None,
- # both
- numthreads=None,
- ):
- """
- Return JPEG XL image from numpy array.
- Float must be in nominal range 0..1.
-
- Currently L, LA, RGB, RGBA images are supported in contig mode.
- Extra channels are only supported for grayscale images in planar mode.
-
- Parameters
- ----------
- level : Default to None, i.e. not overwriting lossess and decodingspeed options.
- When < 0: Use lossless compression
- When in [0,1,2,3,4]: Sets the decoding speed tier for the provided options.
- Minimum is 0 (slowest to decode, best quality/density), and maximum
- is 4 (fastest to decode, at the cost of some quality/density).
- effort : Default to 3.
- Sets encoder effort/speed level without affecting decoding speed.
- Valid values are, from faster to slower speed: 1:lightning 2:thunder
- 3:falcon 4:cheetah 5:hare 6:wombat 7:squirrel 8:kitten 9:tortoise.
- Speed: lightning, thunder, falcon, cheetah, hare, wombat, squirrel, kitten, tortoise
- control the encoder effort in ascending order.
- This also affects memory usage: using lower effort will typically reduce memory
- consumption during encoding.
- lightning and thunder are fast modes useful for lossless mode (modular).
- falcon disables all of the following tools.
- cheetah enables coefficient reordering, context clustering, and heuristics for selecting DCT sizes and quantization steps.
- hare enables Gaborish filtering, chroma from luma, and an initial estimate of quantization steps.
- wombat enables error diffusion quantization and full DCT size selection heuristics.
- squirrel (default) enables dots, patches, and spline detection, and full context clustering.
- kitten optimizes the adaptive quantization for a psychovisual metric.
- tortoise enables a more thorough adaptive quantization search.
- distance : Default to 1.0
- Sets the distance level for lossy compression: target max butteraugli distance,
- lower = higher quality. Range: 0 .. 15. 0.0 = mathematically lossless
- (however, use JxlEncoderSetFrameLossless instead to use true lossless,
- as setting distance to 0 alone is not the only requirement).
- 1.0 = visually lossless. Recommended range: 0.5 .. 3.0.
- lossess : Default to False.
- Use lossess encoding.
- decodingspeed : Default to 0.
- Duplicate to level. [0,4]
- photometric : Return JxlColorSpace value.
- Default logic is quite complicated but works most of the time.
- Accepted value:
- int: [-1,3]
- str: ['RGB',
- 'WHITEISZERO', 'MINISWHITE',
- 'BLACKISZERO', 'MINISBLACK', 'GRAY',
- 'XYB', 'KNOWN']
- planar : Enable multi-channel mode.
- Default to false.
- usecontainer :
- Forces the encoder to use the box-based container format (BMFF)
- even when not necessary.
- When using JxlEncoderUseBoxes, JxlEncoderStoreJPEGMetadata or
- JxlEncoderSetCodestreamLevel with level 10, the encoder will
- automatically also use the container format, it is not necessary
- to use JxlEncoderUseContainer for those use cases.
- By default this setting is disabled.
- index : Selectively decode frames for animation.
- Default to 0, decode all frames.
- When set to > 0, decode that frame index only.
- keeporientation :
- Enables or disables preserving of as-in-bitstream pixeldata orientation.
- Some images are encoded with an Orientation tag indicating that the
- decoder must perform a rotation and/or mirroring to the encoded image data.
-
- If skip_reorientation is JXL_FALSE (the default): the decoder will apply
- the transformation from the orientation setting, hence rendering the image
- according to its specified intent. When producing a JxlBasicInfo, the decoder
- will always set the orientation field to JXL_ORIENT_IDENTITY (matching the
- returned pixel data) and also align xsize and ysize so that they correspond
- to the width and the height of the returned pixel data.
-
- If skip_reorientation is JXL_TRUE: the decoder will skip applying the
- transformation from the orientation setting, returning the image in
- the as-in-bitstream pixeldata orientation. This may be faster to decode
- since the decoder doesnt have to apply the transformation, but can
- cause wrong display of the image if the orientation tag is not correctly
- taken into account by the user.
-
- By default, this option is disabled, and the returned pixel data is
- re-oriented according to the images Orientation setting.
- threads : Default to 1.
- If <= 0, use all cores.
- If > 32, clipped to 32.
- """
-
- self.level = level
- self.effort = effort
- self.distance = distance
- self.lossless = bool(lossless)
- self.decodingspeed = decodingspeed
- self.photometric = photometric
- self.planar = planar
- self.usecontainer = usecontainer
- self.index = index
- self.keeporientation = keeporientation
- self.numthreads = numthreads
-
- def encode(self, buf):
- # TODO: only squeeze all but last dim
- buf = protective_squeeze(numpy.asarray(buf))
- return imagecodecs.jpegxl_encode(
- buf,
- level=self.level,
- effort=self.effort,
- distance=self.distance,
- lossless=self.lossless,
- decodingspeed=self.decodingspeed,
- photometric=self.photometric,
- planar=self.planar,
- usecontainer=self.usecontainer,
- numthreads=self.numthreads,
- )
-
- def decode(self, buf, out=None):
- return imagecodecs.jpegxl_decode(
- buf,
- index=self.index,
- keeporientation=self.keeporientation,
- numthreads=self.numthreads,
- out=out,
- )
-
-
-def _flat(out):
- """Return numpy array as contiguous view of bytes if possible."""
- if out is None:
- return None
- view = memoryview(out)
- if view.readonly or not view.contiguous:
- return None
- return view.cast("B")
-
-
-def register_codecs(codecs=None, force=False, verbose=True):
- """Register codecs in this module with numcodecs."""
- for name, cls in globals().items():
- if not hasattr(cls, "codec_id") or name == "Codec":
- continue
- if codecs is not None and cls.codec_id not in codecs:
- continue
- try:
- try: # noqa: SIM105
- get_codec({"id": cls.codec_id})
- except TypeError:
- # registered, but failed
- pass
- except ValueError:
- # not registered yet
- pass
- else:
- if not force:
- if verbose:
- log_warning(f"numcodec {cls.codec_id!r} already registered")
- continue
- if verbose:
- log_warning(f"replacing registered numcodec {cls.codec_id!r}")
- register_codec(cls)
-
-
-def log_warning(msg, *args, **kwargs):
- """Log message with level WARNING."""
- import logging
-
- logging.getLogger(__name__).warning(msg, *args, **kwargs)
diff --git a/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py b/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py
deleted file mode 100644
index e2973ef818..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Contains utilities to process raw data format of HDF5 files like in: https://github.com/tonyzhaozh/act
-"""
-
-import gc
-import shutil
-from pathlib import Path
-
-import h5py
-import numpy as np
-import torch
-import tqdm
-from datasets import Dataset, Features, Image, Sequence, Value
-from PIL import Image as PILImage
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub.utils import (
- calculate_episode_data_index,
- concatenate_episodes,
- get_default_encoding,
- save_images_concurrently,
-)
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
-
-
-def get_cameras(hdf5_data):
- # ignore depth channel, not currently handled
- # TODO(rcadene): add depth
- rgb_cameras = [key for key in hdf5_data["/observations/images"].keys() if "depth" not in key] # noqa: SIM118
- return rgb_cameras
-
-
-def check_format(raw_dir) -> bool:
- # only frames from simulation are uncompressed
- compressed_images = "sim" not in raw_dir.name
-
- hdf5_paths = list(raw_dir.glob("episode_*.hdf5"))
- assert len(hdf5_paths) != 0
- for hdf5_path in hdf5_paths:
- with h5py.File(hdf5_path, "r") as data:
- assert "/action" in data
- assert "/observations/qpos" in data
-
- assert data["/action"].ndim == 2
- assert data["/observations/qpos"].ndim == 2
-
- num_frames = data["/action"].shape[0]
- assert num_frames == data["/observations/qpos"].shape[0]
-
- for camera in get_cameras(data):
- assert num_frames == data[f"/observations/images/{camera}"].shape[0]
-
- if compressed_images:
- assert data[f"/observations/images/{camera}"].ndim == 2
- else:
- assert data[f"/observations/images/{camera}"].ndim == 4
- b, h, w, c = data[f"/observations/images/{camera}"].shape
- assert c < h and c < w, f"Expect (h,w,c) image format but ({h=},{w=},{c=}) provided."
-
-
-def load_from_raw(
- raw_dir: Path,
- videos_dir: Path,
- fps: int,
- video: bool,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- # only frames from simulation are uncompressed
- compressed_images = "sim" not in raw_dir.name
-
- hdf5_files = sorted(raw_dir.glob("episode_*.hdf5"))
- num_episodes = len(hdf5_files)
-
- ep_dicts = []
- ep_ids = episodes if episodes else range(num_episodes)
- for ep_idx in tqdm.tqdm(ep_ids):
- ep_path = hdf5_files[ep_idx]
- with h5py.File(ep_path, "r") as ep:
- num_frames = ep["/action"].shape[0]
-
- # last step of demonstration is considered done
- done = torch.zeros(num_frames, dtype=torch.bool)
- done[-1] = True
-
- state = torch.from_numpy(ep["/observations/qpos"][:])
- action = torch.from_numpy(ep["/action"][:])
- if "/observations/qvel" in ep:
- velocity = torch.from_numpy(ep["/observations/qvel"][:])
- if "/observations/effort" in ep:
- effort = torch.from_numpy(ep["/observations/effort"][:])
-
- ep_dict = {}
-
- for camera in get_cameras(ep):
- img_key = f"observation.images.{camera}"
-
- if compressed_images:
- import cv2
-
- # load one compressed image after the other in RAM and uncompress
- imgs_array = []
- for data in ep[f"/observations/images/{camera}"]:
- imgs_array.append(cv2.imdecode(data, 1))
- imgs_array = np.array(imgs_array)
-
- else:
- # load all images in RAM
- imgs_array = ep[f"/observations/images/{camera}"][:]
-
- if video:
- # save png images in temporary directory
- tmp_imgs_dir = videos_dir / "tmp_images"
- save_images_concurrently(imgs_array, tmp_imgs_dir)
-
- # encode images to a mp4 video
- fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
- video_path = videos_dir / fname
- encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
-
- # clean temporary images directory
- shutil.rmtree(tmp_imgs_dir)
-
- # store the reference to the video frame
- ep_dict[img_key] = [
- {"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
- ]
- else:
- ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
-
- ep_dict["observation.state"] = state
- if "/observations/velocity" in ep:
- ep_dict["observation.velocity"] = velocity
- if "/observations/effort" in ep:
- ep_dict["observation.effort"] = effort
- ep_dict["action"] = action
- ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
- ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
- ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
- ep_dict["next.done"] = done
- # TODO(rcadene): add reward and success by computing them in sim
-
- assert isinstance(ep_idx, int)
- ep_dicts.append(ep_dict)
-
- gc.collect()
-
- data_dict = concatenate_episodes(ep_dicts)
-
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def to_hf_dataset(data_dict, video) -> Dataset:
- features = {}
-
- keys = [key for key in data_dict if "observation.images." in key]
- for key in keys:
- if video:
- features[key] = VideoFrame()
- else:
- features[key] = Image()
-
- features["observation.state"] = Sequence(
- length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
- )
- if "observation.velocity" in data_dict:
- features["observation.velocity"] = Sequence(
- length=data_dict["observation.velocity"].shape[1], feature=Value(dtype="float32", id=None)
- )
- if "observation.effort" in data_dict:
- features["observation.effort"] = Sequence(
- length=data_dict["observation.effort"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["action"] = Sequence(
- length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["next.done"] = Value(dtype="bool", id=None)
- features["index"] = Value(dtype="int64", id=None)
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- # sanity check
- check_format(raw_dir)
-
- if fps is None:
- fps = 50
-
- data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
- hf_dataset = to_hf_dataset(data_dict, video)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video,
- }
- if video:
- info["encoding"] = get_default_encoding()
-
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py b/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py
deleted file mode 100644
index 264925766f..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Contains utilities to process raw data format of png images files recorded with capture_camera_feed.py
-"""
-
-from pathlib import Path
-
-import torch
-from datasets import Dataset, Features, Image, Value
-from PIL import Image as PILImage
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub.utils import (
- calculate_episode_data_index,
- concatenate_episodes,
-)
-from lerobot.common.datasets.utils import hf_transform_to_torch
-from lerobot.common.datasets.video_utils import VideoFrame
-
-
-def check_format(raw_dir: Path) -> bool:
- image_paths = list(raw_dir.glob("frame_*.png"))
- if len(image_paths) == 0:
- raise ValueError
-
-
-def load_from_raw(raw_dir: Path, fps: int, episodes: list[int] | None = None):
- if episodes is not None:
- # TODO(aliberts): add support for multi-episodes.
- raise NotImplementedError()
-
- ep_dict = {}
- ep_idx = 0
-
- image_paths = sorted(raw_dir.glob("frame_*.png"))
- num_frames = len(image_paths)
-
- ep_dict["observation.image"] = [PILImage.open(x) for x in image_paths]
- ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
- ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
- ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
-
- ep_dicts = [ep_dict]
- data_dict = concatenate_episodes(ep_dicts)
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def to_hf_dataset(data_dict, video) -> Dataset:
- features = {}
- if video:
- features["observation.image"] = VideoFrame()
- else:
- features["observation.image"] = Image()
-
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["index"] = Value(dtype="int64", id=None)
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- if video or episodes or encoding is not None:
- # TODO(aliberts): support this
- raise NotImplementedError
-
- # sanity check
- check_format(raw_dir)
-
- if fps is None:
- fps = 30
-
- data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes)
- hf_dataset = to_hf_dataset(data_dict, video)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video,
- }
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py b/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py
deleted file mode 100644
index 95f9c00712..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Contains utilities to process raw data format from dora-record
-"""
-
-import re
-import warnings
-from pathlib import Path
-
-import pandas as pd
-import torch
-from datasets import Dataset, Features, Image, Sequence, Value
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-from lerobot.common.datasets.video_utils import VideoFrame
-
-
-def check_format(raw_dir) -> bool:
- assert raw_dir.exists()
-
- leader_file = list(raw_dir.glob("*.parquet"))
- if len(leader_file) == 0:
- raise ValueError(f"Missing parquet files in '{raw_dir}'")
- return True
-
-
-def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None = None):
- # Load data stream that will be used as reference for the timestamps synchronization
- reference_files = list(raw_dir.glob("observation.images.cam_*.parquet"))
- if len(reference_files) == 0:
- raise ValueError(f"Missing reference files for camera, starting with in '{raw_dir}'")
- # select first camera in alphanumeric order
- reference_key = sorted(reference_files)[0].stem
- reference_df = pd.read_parquet(raw_dir / f"{reference_key}.parquet")
- reference_df = reference_df[["timestamp_utc", reference_key]]
-
- # Merge all data stream using nearest backward strategy
- df = reference_df
- for path in raw_dir.glob("*.parquet"):
- key = path.stem # action or observation.state or ...
- if key == reference_key:
- continue
- if "failed_episode_index" in key:
- # TODO(rcadene): add support for removing episodes that are tagged as "failed"
- continue
- modality_df = pd.read_parquet(path)
- modality_df = modality_df[["timestamp_utc", key]]
- df = pd.merge_asof(
- df,
- modality_df,
- on="timestamp_utc",
- # "nearest" is the best option over "backward", since the latter can desynchronizes camera timestamps by
- # matching timestamps that are too far appart, in order to fit the backward constraints. It's not the case for "nearest".
- # However, note that "nearest" might synchronize the reference camera with other cameras on slightly future timestamps.
- # are too far appart.
- direction="nearest",
- tolerance=pd.Timedelta(f"{1/fps} seconds"),
- )
- # Remove rows with episode_index -1 which indicates data that correspond to in-between episodes
- df = df[df["episode_index"] != -1]
-
- image_keys = [key for key in df if "observation.images." in key]
-
- def get_episode_index(row):
- episode_index_per_cam = {}
- for key in image_keys:
- path = row[key][0]["path"]
- match = re.search(r"_(\d{6}).mp4", path)
- if not match:
- raise ValueError(path)
- episode_index = int(match.group(1))
- episode_index_per_cam[key] = episode_index
- if len(set(episode_index_per_cam.values())) != 1:
- raise ValueError(
- f"All cameras are expected to belong to the same episode, but getting {episode_index_per_cam}"
- )
- return episode_index
-
- df["episode_index"] = df.apply(get_episode_index, axis=1)
-
- # dora only use arrays, so single values are encapsulated into a list
- df["frame_index"] = df.groupby("episode_index").cumcount()
- df = df.reset_index()
- df["index"] = df.index
-
- # set 'next.done' to True for the last frame of each episode
- df["next.done"] = False
- df.loc[df.groupby("episode_index").tail(1).index, "next.done"] = True
-
- df["timestamp"] = df["timestamp_utc"].map(lambda x: x.timestamp())
- # each episode starts with timestamp 0 to match the ones from the video
- df["timestamp"] = df.groupby("episode_index")["timestamp"].transform(lambda x: x - x.iloc[0])
-
- del df["timestamp_utc"]
-
- # sanity check
- has_nan = df.isna().any().any()
- if has_nan:
- raise ValueError("Dataset contains Nan values.")
-
- # sanity check episode indices go from 0 to n-1
- ep_ids = [ep_idx for ep_idx, _ in df.groupby("episode_index")]
- expected_ep_ids = list(range(df["episode_index"].max() + 1))
- if ep_ids != expected_ep_ids:
- raise ValueError(f"Episodes indices go from {ep_ids} instead of {expected_ep_ids}")
-
- # Create symlink to raw videos directory (that needs to be absolute not relative)
- videos_dir.parent.mkdir(parents=True, exist_ok=True)
- videos_dir.symlink_to((raw_dir / "videos").absolute())
-
- # sanity check the video paths are well formated
- for key in df:
- if "observation.images." not in key:
- continue
- for ep_idx in ep_ids:
- video_path = videos_dir / f"{key}_episode_{ep_idx:06d}.mp4"
- if not video_path.exists():
- raise ValueError(f"Video file not found in {video_path}")
-
- data_dict = {}
- for key in df:
- # is video frame
- if "observation.images." in key:
- # we need `[0] because dora only use arrays, so single values are encapsulated into a list.
- # it is the case for video_frame dictionary = [{"path": ..., "timestamp": ...}]
- data_dict[key] = [video_frame[0] for video_frame in df[key].values]
-
- # sanity check the video path is well formated
- video_path = videos_dir.parent / data_dict[key][0]["path"]
- if not video_path.exists():
- raise ValueError(f"Video file not found in {video_path}")
- # is number
- elif df[key].iloc[0].ndim == 0 or df[key].iloc[0].shape[0] == 1:
- data_dict[key] = torch.from_numpy(df[key].values)
- # is vector
- elif df[key].iloc[0].shape[0] > 1:
- data_dict[key] = torch.stack([torch.from_numpy(x.copy()) for x in df[key].values])
- else:
- raise ValueError(key)
-
- return data_dict
-
-
-def to_hf_dataset(data_dict, video) -> Dataset:
- features = {}
-
- keys = [key for key in data_dict if "observation.images." in key]
- for key in keys:
- if video:
- features[key] = VideoFrame()
- else:
- features[key] = Image()
-
- features["observation.state"] = Sequence(
- length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
- )
- if "observation.velocity" in data_dict:
- features["observation.velocity"] = Sequence(
- length=data_dict["observation.velocity"].shape[1], feature=Value(dtype="float32", id=None)
- )
- if "observation.effort" in data_dict:
- features["observation.effort"] = Sequence(
- length=data_dict["observation.effort"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["action"] = Sequence(
- length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["next.done"] = Value(dtype="bool", id=None)
- features["index"] = Value(dtype="int64", id=None)
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- # sanity check
- check_format(raw_dir)
-
- if fps is None:
- fps = 30
- else:
- raise NotImplementedError()
-
- if not video:
- raise NotImplementedError()
-
- if encoding is not None:
- warnings.warn(
- "Video encoding is currently done outside of LeRobot for the dora_parquet format.",
- stacklevel=1,
- )
-
- data_df = load_from_raw(raw_dir, videos_dir, fps, episodes)
- hf_dataset = to_hf_dataset(data_df, video)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video,
- }
- if video:
- info["encoding"] = "unknown"
-
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py b/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py
deleted file mode 100644
index 1f8a5d1441..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-For all datasets in the RLDS format.
-For https://github.com/google-deepmind/open_x_embodiment (OPENX) datasets.
-
-NOTE: You need to install tensorflow and tensorflow_datsets before running this script.
-
-Example:
- python lerobot/scripts/push_dataset_to_hub.py \
- --raw-dir /path/to/data/bridge_dataset/1.0.0/ \
- --repo-id your_hub/sampled_bridge_data_v2 \
- --raw-format rlds \
- --episodes 3 4 5 8 9
-
-Exact dataset fps defined in openx/config.py, obtained from:
- https://docs.google.com/spreadsheets/d/1rPBD77tk60AEIGZrGSODwyyzs5FgCU9Uz3h-3_t2A9g/edit?gid=0#gid=0&range=R:R
-"""
-
-import shutil
-from pathlib import Path
-
-import numpy as np
-import tensorflow as tf
-import tensorflow_datasets as tfds
-import torch
-import tqdm
-from datasets import Dataset, Features, Image, Sequence, Value
-from PIL import Image as PILImage
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub.utils import (
- calculate_episode_data_index,
- concatenate_episodes,
- get_default_encoding,
- save_images_concurrently,
-)
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
-
-np.set_printoptions(precision=2)
-
-
-def tf_to_torch(data):
- return torch.from_numpy(data.numpy())
-
-
-def tf_img_convert(img):
- if img.dtype == tf.string:
- img = tf.io.decode_image(img, expand_animations=False, dtype=tf.uint8)
- elif img.dtype != tf.uint8:
- raise ValueError(f"Unsupported image dtype: found with dtype {img.dtype}")
- return img.numpy()
-
-
-def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict:
- """
- In the RLDS format, each trajectory has some top-level metadata that is explicitly separated out, and a "steps"
- entry. This function moves the "steps" entry to the top level, broadcasting any metadata to the length of the
- trajectory. This function also adds the extra metadata fields `_len`, `_traj_index`, and `_frame_index`.
-
- NOTE: adapted from DLimp library https://github.com/kvablack/dlimp/
- """
- steps = traj.pop("steps")
-
- traj_len = tf.shape(tf.nest.flatten(steps)[0])[0]
-
- # broadcast metadata to the length of the trajectory
- metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj)
-
- # put steps back in
- assert "traj_metadata" not in steps
- traj = {**steps, "traj_metadata": metadata}
-
- assert "_len" not in traj
- assert "_traj_index" not in traj
- assert "_frame_index" not in traj
- traj["_len"] = tf.repeat(traj_len, traj_len)
- traj["_traj_index"] = tf.repeat(i, traj_len)
- traj["_frame_index"] = tf.range(traj_len)
-
- return traj
-
-
-def load_from_raw(
- raw_dir: Path,
- videos_dir: Path,
- fps: int,
- video: bool,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- """
- Args:
- raw_dir (Path): _description_
- videos_dir (Path): _description_
- fps (int): _description_
- video (bool): _description_
- episodes (list[int] | None, optional): _description_. Defaults to None.
- """
- ds_builder = tfds.builder_from_directory(str(raw_dir))
- dataset = ds_builder.as_dataset(
- split="all",
- decoders={"steps": tfds.decode.SkipDecoding()},
- )
-
- dataset_info = ds_builder.info
- print("dataset_info: ", dataset_info)
-
- ds_length = len(dataset)
- dataset = dataset.take(ds_length)
- # "flatten" the dataset as such we can apply trajectory level map() easily
- # each [obs][key] has a shape of (frame_size, ...)
- dataset = dataset.enumerate().map(_broadcast_metadata_rlds)
-
- # we will apply the standardization transform if the dataset_name is provided
- # if the dataset name is not provided and the goal is to convert any rlds formatted dataset
- # search for 'image' keys in the observations
- image_keys = []
- state_keys = []
- observation_info = dataset_info.features["steps"]["observation"]
- for key in observation_info:
- # check whether the key is for an image or a vector observation
- if len(observation_info[key].shape) == 3:
- # only adding uint8 images discards depth images
- if observation_info[key].dtype == tf.uint8:
- image_keys.append(key)
- else:
- state_keys.append(key)
-
- lang_key = "language_instruction" if "language_instruction" in dataset.element_spec else None
-
- print(" - image_keys: ", image_keys)
- print(" - lang_key: ", lang_key)
-
- it = iter(dataset)
-
- ep_dicts = []
- # Init temp path to save ep_dicts in case of crash
- tmp_ep_dicts_dir = videos_dir.parent.joinpath("ep_dicts")
- tmp_ep_dicts_dir.mkdir(parents=True, exist_ok=True)
-
- # check if ep_dicts have already been saved in /tmp
- starting_ep_idx = 0
- saved_ep_dicts = [ep.__str__() for ep in tmp_ep_dicts_dir.iterdir()]
- if len(saved_ep_dicts) > 0:
- saved_ep_dicts.sort()
- # get last ep_idx number
- starting_ep_idx = int(saved_ep_dicts[-1][-13:-3]) + 1
- for i in range(starting_ep_idx):
- episode = next(it)
- ep_dicts.append(torch.load(saved_ep_dicts[i]))
-
- # if we user specified episodes, skip the ones not in the list
- if episodes is not None:
- if ds_length == 0:
- raise ValueError("No episodes found.")
- # convert episodes index to sorted list
- episodes = sorted(episodes)
-
- for ep_idx in tqdm.tqdm(range(starting_ep_idx, ds_length)):
- episode = next(it)
-
- # if user specified episodes, skip the ones not in the list
- if episodes is not None:
- if len(episodes) == 0:
- break
- if ep_idx == episodes[0]:
- # process this episode
- print(" selecting episode idx: ", ep_idx)
- episodes.pop(0)
- else:
- continue # skip
-
- num_frames = episode["action"].shape[0]
-
- ep_dict = {}
- for key in state_keys:
- ep_dict[f"observation.{key}"] = tf_to_torch(episode["observation"][key])
-
- ep_dict["action"] = tf_to_torch(episode["action"])
- ep_dict["next.reward"] = tf_to_torch(episode["reward"]).float()
- ep_dict["next.done"] = tf_to_torch(episode["is_last"])
- ep_dict["is_terminal"] = tf_to_torch(episode["is_terminal"])
- ep_dict["is_first"] = tf_to_torch(episode["is_first"])
- ep_dict["discount"] = tf_to_torch(episode["discount"])
-
- # If lang_key is present, convert the entire tensor at once
- if lang_key is not None:
- ep_dict["language_instruction"] = [x.numpy().decode("utf-8") for x in episode[lang_key]]
-
- ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
- ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
- ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
-
- image_array_dict = {key: [] for key in image_keys}
-
- for im_key in image_keys:
- imgs = episode["observation"][im_key]
- image_array_dict[im_key] = [tf_img_convert(img) for img in imgs]
-
- # loop through all cameras
- for im_key in image_keys:
- img_key = f"observation.images.{im_key}"
- imgs_array = image_array_dict[im_key]
- imgs_array = np.array(imgs_array)
- if video:
- # save png images in temporary directory
- tmp_imgs_dir = videos_dir / "tmp_images"
- save_images_concurrently(imgs_array, tmp_imgs_dir)
-
- # encode images to a mp4 video
- fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
- video_path = videos_dir / fname
- encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
-
- # clean temporary images directory
- shutil.rmtree(tmp_imgs_dir)
-
- # store the reference to the video frame
- ep_dict[img_key] = [
- {"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
- ]
- else:
- ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
-
- path_ep_dict = tmp_ep_dicts_dir.joinpath(
- "ep_dict_" + "0" * (10 - len(str(ep_idx))) + str(ep_idx) + ".pt"
- )
- torch.save(ep_dict, path_ep_dict)
-
- ep_dicts.append(ep_dict)
-
- data_dict = concatenate_episodes(ep_dicts)
-
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def to_hf_dataset(data_dict, video) -> Dataset:
- features = {}
-
- for key in data_dict:
- # check if vector state obs
- if key.startswith("observation.") and "observation.images." not in key:
- features[key] = Sequence(length=data_dict[key].shape[1], feature=Value(dtype="float32", id=None))
- # check if image obs
- elif "observation.images." in key:
- if video:
- features[key] = VideoFrame()
- else:
- features[key] = Image()
-
- if "language_instruction" in data_dict:
- features["language_instruction"] = Value(dtype="string", id=None)
-
- features["action"] = Sequence(
- length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
- )
-
- features["is_terminal"] = Value(dtype="bool", id=None)
- features["is_first"] = Value(dtype="bool", id=None)
- features["discount"] = Value(dtype="float32", id=None)
-
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["next.reward"] = Value(dtype="float32", id=None)
- features["next.done"] = Value(dtype="bool", id=None)
- features["index"] = Value(dtype="int64", id=None)
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
- hf_dataset = to_hf_dataset(data_dict, video)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video,
- }
- if video:
- info["encoding"] = get_default_encoding()
-
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py b/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py
deleted file mode 100644
index 27b31ba24b..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Process zarr files formatted like in: https://github.com/real-stanford/diffusion_policy"""
-
-import shutil
-from pathlib import Path
-
-import numpy as np
-import torch
-import tqdm
-import zarr
-from datasets import Dataset, Features, Image, Sequence, Value
-from PIL import Image as PILImage
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub.utils import (
- calculate_episode_data_index,
- concatenate_episodes,
- get_default_encoding,
- save_images_concurrently,
-)
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
-
-
-def check_format(raw_dir):
- zarr_path = raw_dir / "pusht_cchi_v7_replay.zarr"
- zarr_data = zarr.open(zarr_path, mode="r")
-
- required_datasets = {
- "data/action",
- "data/img",
- "data/keypoint",
- "data/n_contacts",
- "data/state",
- "meta/episode_ends",
- }
- for dataset in required_datasets:
- assert dataset in zarr_data
- nb_frames = zarr_data["data/img"].shape[0]
-
- required_datasets.remove("meta/episode_ends")
-
- assert all(nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)
-
-
-def load_from_raw(
- raw_dir: Path,
- videos_dir: Path,
- fps: int,
- video: bool,
- episodes: list[int] | None = None,
- keypoints_instead_of_image: bool = False,
- encoding: dict | None = None,
-):
- try:
- import pymunk
- from gym_pusht.envs.pusht import PushTEnv, pymunk_to_shapely
-
- from lerobot.common.datasets.push_dataset_to_hub._diffusion_policy_replay_buffer import (
- ReplayBuffer as DiffusionPolicyReplayBuffer,
- )
- except ModuleNotFoundError as e:
- print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`")
- raise e
- # as define in gmy-pusht env: https://github.com/huggingface/gym-pusht/blob/e0684ff988d223808c0a9dcfaba9dc4991791370/gym_pusht/envs/pusht.py#L174
- success_threshold = 0.95 # 95% coverage,
-
- zarr_path = raw_dir / "pusht_cchi_v7_replay.zarr"
- zarr_data = DiffusionPolicyReplayBuffer.copy_from_path(zarr_path)
-
- episode_ids = torch.from_numpy(zarr_data.get_episode_idxs())
- assert len(
- {zarr_data[key].shape[0] for key in zarr_data.keys()} # noqa: SIM118
- ), "Some data type dont have the same number of total frames."
-
- # TODO(rcadene): verify that goal pose is expected to be fixed
- goal_pos_angle = np.array([256, 256, np.pi / 4]) # x, y, theta (in radians)
- goal_body = PushTEnv.get_goal_pose_body(goal_pos_angle)
-
- imgs = torch.from_numpy(zarr_data["img"]) # b h w c
- states = torch.from_numpy(zarr_data["state"])
- actions = torch.from_numpy(zarr_data["action"])
-
- # load data indices from which each episode starts and ends
- from_ids, to_ids = [], []
- from_idx = 0
- for to_idx in zarr_data.meta["episode_ends"]:
- from_ids.append(from_idx)
- to_ids.append(to_idx)
- from_idx = to_idx
-
- num_episodes = len(from_ids)
-
- ep_dicts = []
- ep_ids = episodes if episodes else range(num_episodes)
- for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)):
- from_idx = from_ids[selected_ep_idx]
- to_idx = to_ids[selected_ep_idx]
- num_frames = to_idx - from_idx
-
- # sanity check
- assert (episode_ids[from_idx:to_idx] == ep_idx).all()
-
- # get image
- if not keypoints_instead_of_image:
- image = imgs[from_idx:to_idx]
- assert image.min() >= 0.0
- assert image.max() <= 255.0
- image = image.type(torch.uint8)
-
- # get state
- state = states[from_idx:to_idx]
- agent_pos = state[:, :2]
- block_pos = state[:, 2:4]
- block_angle = state[:, 4]
-
- # get reward, success, done, and (maybe) keypoints
- reward = torch.zeros(num_frames)
- success = torch.zeros(num_frames, dtype=torch.bool)
- if keypoints_instead_of_image:
- keypoints = torch.zeros(num_frames, 16) # 8 keypoints each with 2 coords
- done = torch.zeros(num_frames, dtype=torch.bool)
- for i in range(num_frames):
- space = pymunk.Space()
- space.gravity = 0, 0
- space.damping = 0
-
- # Add walls.
- walls = [
- PushTEnv.add_segment(space, (5, 506), (5, 5), 2),
- PushTEnv.add_segment(space, (5, 5), (506, 5), 2),
- PushTEnv.add_segment(space, (506, 5), (506, 506), 2),
- PushTEnv.add_segment(space, (5, 506), (506, 506), 2),
- ]
- space.add(*walls)
-
- block_body, block_shapes = PushTEnv.add_tee(space, block_pos[i].tolist(), block_angle[i].item())
- goal_geom = pymunk_to_shapely(goal_body, block_body.shapes)
- block_geom = pymunk_to_shapely(block_body, block_body.shapes)
- intersection_area = goal_geom.intersection(block_geom).area
- goal_area = goal_geom.area
- coverage = intersection_area / goal_area
- reward[i] = np.clip(coverage / success_threshold, 0, 1)
- success[i] = coverage > success_threshold
- if keypoints_instead_of_image:
- keypoints[i] = torch.from_numpy(PushTEnv.get_keypoints(block_shapes).flatten())
-
- # last step of demonstration is considered done
- done[-1] = True
-
- ep_dict = {}
-
- if not keypoints_instead_of_image:
- imgs_array = [x.numpy() for x in image]
- img_key = "observation.image"
- if video:
- # save png images in temporary directory
- tmp_imgs_dir = videos_dir / "tmp_images"
- save_images_concurrently(imgs_array, tmp_imgs_dir)
-
- # encode images to a mp4 video
- fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
- video_path = videos_dir / fname
- encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
-
- # clean temporary images directory
- shutil.rmtree(tmp_imgs_dir)
-
- # store the reference to the video frame
- ep_dict[img_key] = [
- {"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
- ]
- else:
- ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
-
- ep_dict["observation.state"] = agent_pos
- if keypoints_instead_of_image:
- ep_dict["observation.environment_state"] = keypoints
- ep_dict["action"] = actions[from_idx:to_idx]
- ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
- ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
- ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
- # ep_dict["next.observation.image"] = image[1:],
- # ep_dict["next.observation.state"] = agent_pos[1:],
- # TODO(rcadene)] = verify that reward and done are aligned with image and agent_pos
- ep_dict["next.reward"] = torch.cat([reward[1:], reward[[-1]]])
- ep_dict["next.done"] = torch.cat([done[1:], done[[-1]]])
- ep_dict["next.success"] = torch.cat([success[1:], success[[-1]]])
- ep_dicts.append(ep_dict)
- data_dict = concatenate_episodes(ep_dicts)
-
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def to_hf_dataset(data_dict, video, keypoints_instead_of_image: bool = False):
- features = {}
-
- if not keypoints_instead_of_image:
- if video:
- features["observation.image"] = VideoFrame()
- else:
- features["observation.image"] = Image()
-
- features["observation.state"] = Sequence(
- length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
- )
- if keypoints_instead_of_image:
- features["observation.environment_state"] = Sequence(
- length=data_dict["observation.environment_state"].shape[1],
- feature=Value(dtype="float32", id=None),
- )
- features["action"] = Sequence(
- length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["next.reward"] = Value(dtype="float32", id=None)
- features["next.done"] = Value(dtype="bool", id=None)
- features["next.success"] = Value(dtype="bool", id=None)
- features["index"] = Value(dtype="int64", id=None)
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- # Manually change this to True to use keypoints of the T instead of an image observation (but don't merge
- # with True). Also make sure to use video = 0 in the `push_dataset_to_hub.py` script.
- keypoints_instead_of_image = False
-
- # sanity check
- check_format(raw_dir)
-
- if fps is None:
- fps = 10
-
- data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, keypoints_instead_of_image, encoding)
- hf_dataset = to_hf_dataset(data_dict, video, keypoints_instead_of_image)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video if not keypoints_instead_of_image else 0,
- }
- if video:
- info["encoding"] = get_default_encoding()
-
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py b/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py
deleted file mode 100644
index fec893a7f1..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Process UMI (Universal Manipulation Interface) data stored in Zarr format like in: https://github.com/real-stanford/universal_manipulation_interface"""
-
-import logging
-import shutil
-from pathlib import Path
-
-import torch
-import tqdm
-import zarr
-from datasets import Dataset, Features, Image, Sequence, Value
-from PIL import Image as PILImage
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub._umi_imagecodecs_numcodecs import register_codecs
-from lerobot.common.datasets.push_dataset_to_hub.utils import (
- calculate_episode_data_index,
- concatenate_episodes,
- get_default_encoding,
- save_images_concurrently,
-)
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
-
-
-def check_format(raw_dir) -> bool:
- zarr_path = raw_dir / "cup_in_the_wild.zarr"
- zarr_data = zarr.open(zarr_path, mode="r")
-
- required_datasets = {
- "data/robot0_demo_end_pose",
- "data/robot0_demo_start_pose",
- "data/robot0_eef_pos",
- "data/robot0_eef_rot_axis_angle",
- "data/robot0_gripper_width",
- "meta/episode_ends",
- "data/camera0_rgb",
- }
- for dataset in required_datasets:
- if dataset not in zarr_data:
- return False
-
- # mandatory to access zarr_data
- register_codecs()
- nb_frames = zarr_data["data/camera0_rgb"].shape[0]
-
- required_datasets.remove("meta/episode_ends")
- assert all(nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)
-
-
-def load_from_raw(
- raw_dir: Path,
- videos_dir: Path,
- fps: int,
- video: bool,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- zarr_path = raw_dir / "cup_in_the_wild.zarr"
- zarr_data = zarr.open(zarr_path, mode="r")
-
- # We process the image data separately because it is too large to fit in memory
- end_pose = torch.from_numpy(zarr_data["data/robot0_demo_end_pose"][:])
- start_pos = torch.from_numpy(zarr_data["data/robot0_demo_start_pose"][:])
- eff_pos = torch.from_numpy(zarr_data["data/robot0_eef_pos"][:])
- eff_rot_axis_angle = torch.from_numpy(zarr_data["data/robot0_eef_rot_axis_angle"][:])
- gripper_width = torch.from_numpy(zarr_data["data/robot0_gripper_width"][:])
-
- states_pos = torch.cat([eff_pos, eff_rot_axis_angle], dim=1)
- states = torch.cat([states_pos, gripper_width], dim=1)
-
- episode_ends = zarr_data["meta/episode_ends"][:]
- num_episodes = episode_ends.shape[0]
-
- # We convert it in torch tensor later because the jit function does not support torch tensors
- episode_ends = torch.from_numpy(episode_ends)
-
- # load data indices from which each episode starts and ends
- from_ids, to_ids = [], []
- from_idx = 0
- for to_idx in episode_ends:
- from_ids.append(from_idx)
- to_ids.append(to_idx)
- from_idx = to_idx
-
- ep_dicts_dir = videos_dir / "ep_dicts"
- ep_dicts_dir.mkdir(exist_ok=True, parents=True)
- ep_dicts = []
-
- ep_ids = episodes if episodes else range(num_episodes)
- for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)):
- ep_dict_path = ep_dicts_dir / f"{ep_idx}"
- if not ep_dict_path.is_file():
- from_idx = from_ids[selected_ep_idx]
- to_idx = to_ids[selected_ep_idx]
- num_frames = to_idx - from_idx
-
- # TODO(rcadene): save temporary images of the episode?
-
- state = states[from_idx:to_idx]
-
- ep_dict = {}
-
- # load 57MB of images in RAM (400x224x224x3 uint8)
- imgs_array = zarr_data["data/camera0_rgb"][from_idx:to_idx]
- img_key = "observation.image"
- if video:
- fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
- video_path = videos_dir / fname
- if not video_path.is_file():
- # save png images in temporary directory
- tmp_imgs_dir = videos_dir / "tmp_images"
- save_images_concurrently(imgs_array, tmp_imgs_dir)
-
- # encode images to a mp4 video
- encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
-
- # clean temporary images directory
- shutil.rmtree(tmp_imgs_dir)
-
- # store the reference to the video frame
- ep_dict[img_key] = [
- {"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
- ]
- else:
- ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
-
- ep_dict["observation.state"] = state
- ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
- ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
- ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
- ep_dict["episode_data_index_from"] = torch.tensor([from_idx] * num_frames)
- ep_dict["episode_data_index_to"] = torch.tensor([from_idx + num_frames] * num_frames)
- ep_dict["end_pose"] = end_pose[from_idx:to_idx]
- ep_dict["start_pos"] = start_pos[from_idx:to_idx]
- ep_dict["gripper_width"] = gripper_width[from_idx:to_idx]
- torch.save(ep_dict, ep_dict_path)
- else:
- ep_dict = torch.load(ep_dict_path)
-
- ep_dicts.append(ep_dict)
-
- data_dict = concatenate_episodes(ep_dicts)
-
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def to_hf_dataset(data_dict, video):
- features = {}
-
- if video:
- features["observation.image"] = VideoFrame()
- else:
- features["observation.image"] = Image()
-
- features["observation.state"] = Sequence(
- length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["index"] = Value(dtype="int64", id=None)
- features["episode_data_index_from"] = Value(dtype="int64", id=None)
- features["episode_data_index_to"] = Value(dtype="int64", id=None)
- # `start_pos` and `end_pos` respectively represent the positions of the end-effector
- # at the beginning and the end of the episode.
- # `gripper_width` indicates the distance between the grippers, and this value is included
- # in the state vector, which comprises the concatenation of the end-effector position
- # and gripper width.
- features["end_pose"] = Sequence(
- length=data_dict["end_pose"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["start_pos"] = Sequence(
- length=data_dict["start_pos"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["gripper_width"] = Sequence(
- length=data_dict["gripper_width"].shape[1], feature=Value(dtype="float32", id=None)
- )
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- # sanity check
- check_format(raw_dir)
-
- if fps is None:
- # For umi cup in the wild: https://arxiv.org/pdf/2402.10329#table.caption.16
- fps = 10
-
- if not video:
- logging.warning(
- "Generating UMI dataset without `video=True` creates ~150GB on disk and requires ~80GB in RAM."
- )
-
- data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
- hf_dataset = to_hf_dataset(data_dict, video)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video,
- }
- if video:
- info["encoding"] = get_default_encoding()
-
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/push_dataset_to_hub/utils.py b/lerobot/common/datasets/push_dataset_to_hub/utils.py
deleted file mode 100644
index ebcf87f77f..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/utils.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import inspect
-from concurrent.futures import ThreadPoolExecutor
-from pathlib import Path
-from typing import Dict
-
-import datasets
-import numpy
-import PIL
-import torch
-
-from lerobot.common.datasets.video_utils import encode_video_frames
-
-
-def concatenate_episodes(ep_dicts):
- data_dict = {}
-
- keys = ep_dicts[0].keys()
- for key in keys:
- if torch.is_tensor(ep_dicts[0][key][0]):
- data_dict[key] = torch.cat([ep_dict[key] for ep_dict in ep_dicts])
- else:
- if key not in data_dict:
- data_dict[key] = []
- for ep_dict in ep_dicts:
- for x in ep_dict[key]:
- data_dict[key].append(x)
-
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def save_images_concurrently(imgs_array: numpy.array, out_dir: Path, max_workers: int = 4):
- out_dir = Path(out_dir)
- out_dir.mkdir(parents=True, exist_ok=True)
-
- def save_image(img_array, i, out_dir):
- img = PIL.Image.fromarray(img_array)
- img.save(str(out_dir / f"frame_{i:06d}.png"), quality=100)
-
- num_images = len(imgs_array)
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
- [executor.submit(save_image, imgs_array[i], i, out_dir) for i in range(num_images)]
-
-
-def get_default_encoding() -> dict:
- """Returns the default ffmpeg encoding parameters used by `encode_video_frames`."""
- signature = inspect.signature(encode_video_frames)
- return {
- k: v.default
- for k, v in signature.parameters.items()
- if v.default is not inspect.Parameter.empty and k in ["vcodec", "pix_fmt", "g", "crf"]
- }
-
-
-def check_repo_id(repo_id: str) -> None:
- if len(repo_id.split("/")) != 2:
- raise ValueError(
- f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset
- (e.g. 'lerobot/pusht'), but contains '{repo_id}'."""
- )
-
-
-# TODO(aliberts): remove
-def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]:
- """
- Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.
-
- Parameters:
- - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index.
-
- Returns:
- - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys:
- - "from": A tensor containing the starting index of each episode.
- - "to": A tensor containing the ending index of each episode.
- """
- episode_data_index = {"from": [], "to": []}
-
- current_episode = None
- """
- The episode_index is a list of integers, each representing the episode index of the corresponding example.
- For instance, the following is a valid episode_index:
- [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
-
- Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and
- ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this:
- {
- "from": [0, 3, 7],
- "to": [3, 7, 12]
- }
- """
- if len(hf_dataset) == 0:
- episode_data_index = {
- "from": torch.tensor([]),
- "to": torch.tensor([]),
- }
- return episode_data_index
- for idx, episode_idx in enumerate(hf_dataset["episode_index"]):
- if episode_idx != current_episode:
- # We encountered a new episode, so we append its starting location to the "from" list
- episode_data_index["from"].append(idx)
- # If this is not the first episode, we append the ending location of the previous episode to the "to" list
- if current_episode is not None:
- episode_data_index["to"].append(idx)
- # Let's keep track of the current episode index
- current_episode = episode_idx
- else:
- # We are still in the same episode, so there is nothing for us to do here
- pass
- # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list
- episode_data_index["to"].append(idx + 1)
-
- for k in ["from", "to"]:
- episode_data_index[k] = torch.tensor(episode_data_index[k])
-
- return episode_data_index
diff --git a/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py b/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py
deleted file mode 100644
index 0047e48c30..0000000000
--- a/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Process pickle files formatted like in: https://github.com/fyhMer/fowm"""
-
-import pickle
-import shutil
-from pathlib import Path
-
-import einops
-import torch
-import tqdm
-from datasets import Dataset, Features, Image, Sequence, Value
-from PIL import Image as PILImage
-
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
-from lerobot.common.datasets.push_dataset_to_hub.utils import (
- calculate_episode_data_index,
- concatenate_episodes,
- get_default_encoding,
- save_images_concurrently,
-)
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
-
-
-def check_format(raw_dir):
- keys = {"actions", "rewards", "dones"}
- nested_keys = {"observations": {"rgb", "state"}, "next_observations": {"rgb", "state"}}
-
- xarm_files = list(raw_dir.glob("*.pkl"))
- assert len(xarm_files) > 0
-
- with open(xarm_files[0], "rb") as f:
- dataset_dict = pickle.load(f)
-
- assert isinstance(dataset_dict, dict)
- assert all(k in dataset_dict for k in keys)
-
- # Check for consistent lengths in nested keys
- expected_len = len(dataset_dict["actions"])
- assert all(len(dataset_dict[key]) == expected_len for key in keys if key in dataset_dict)
-
- for key, subkeys in nested_keys.items():
- nested_dict = dataset_dict.get(key, {})
- assert all(len(nested_dict[subkey]) == expected_len for subkey in subkeys if subkey in nested_dict)
-
-
-def load_from_raw(
- raw_dir: Path,
- videos_dir: Path,
- fps: int,
- video: bool,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- pkl_path = raw_dir / "buffer.pkl"
-
- with open(pkl_path, "rb") as f:
- pkl_data = pickle.load(f)
-
- # load data indices from which each episode starts and ends
- from_ids, to_ids = [], []
- from_idx, to_idx = 0, 0
- for done in pkl_data["dones"]:
- to_idx += 1
- if not done:
- continue
- from_ids.append(from_idx)
- to_ids.append(to_idx)
- from_idx = to_idx
-
- num_episodes = len(from_ids)
-
- ep_dicts = []
- ep_ids = episodes if episodes else range(num_episodes)
- for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)):
- from_idx = from_ids[selected_ep_idx]
- to_idx = to_ids[selected_ep_idx]
- num_frames = to_idx - from_idx
-
- image = torch.tensor(pkl_data["observations"]["rgb"][from_idx:to_idx])
- image = einops.rearrange(image, "b c h w -> b h w c")
- state = torch.tensor(pkl_data["observations"]["state"][from_idx:to_idx])
- action = torch.tensor(pkl_data["actions"][from_idx:to_idx])
- # TODO(rcadene): we have a missing last frame which is the observation when the env is done
- # it is critical to have this frame for tdmpc to predict a "done observation/state"
- # next_image = torch.tensor(pkl_data["next_observations"]["rgb"][from_idx:to_idx])
- # next_state = torch.tensor(pkl_data["next_observations"]["state"][from_idx:to_idx])
- next_reward = torch.tensor(pkl_data["rewards"][from_idx:to_idx])
- next_done = torch.tensor(pkl_data["dones"][from_idx:to_idx])
-
- ep_dict = {}
-
- imgs_array = [x.numpy() for x in image]
- img_key = "observation.image"
- if video:
- # save png images in temporary directory
- tmp_imgs_dir = videos_dir / "tmp_images"
- save_images_concurrently(imgs_array, tmp_imgs_dir)
-
- # encode images to a mp4 video
- fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
- video_path = videos_dir / fname
- encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
-
- # clean temporary images directory
- shutil.rmtree(tmp_imgs_dir)
-
- # store the reference to the video frame
- ep_dict[img_key] = [{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)]
- else:
- ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
-
- ep_dict["observation.state"] = state
- ep_dict["action"] = action
- ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
- ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
- ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
- # ep_dict["next.observation.image"] = next_image
- # ep_dict["next.observation.state"] = next_state
- ep_dict["next.reward"] = next_reward
- ep_dict["next.done"] = next_done
- ep_dicts.append(ep_dict)
-
- data_dict = concatenate_episodes(ep_dicts)
-
- total_frames = data_dict["frame_index"].shape[0]
- data_dict["index"] = torch.arange(0, total_frames, 1)
- return data_dict
-
-
-def to_hf_dataset(data_dict, video):
- features = {}
-
- if video:
- features["observation.image"] = VideoFrame()
- else:
- features["observation.image"] = Image()
-
- features["observation.state"] = Sequence(
- length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["action"] = Sequence(
- length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
- )
- features["episode_index"] = Value(dtype="int64", id=None)
- features["frame_index"] = Value(dtype="int64", id=None)
- features["timestamp"] = Value(dtype="float32", id=None)
- features["next.reward"] = Value(dtype="float32", id=None)
- features["next.done"] = Value(dtype="bool", id=None)
- features["index"] = Value(dtype="int64", id=None)
- # TODO(rcadene): add success
- # features["next.success"] = Value(dtype='bool', id=None)
-
- hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
- hf_dataset.set_transform(hf_transform_to_torch)
- return hf_dataset
-
-
-def from_raw_to_lerobot_format(
- raw_dir: Path,
- videos_dir: Path,
- fps: int | None = None,
- video: bool = True,
- episodes: list[int] | None = None,
- encoding: dict | None = None,
-):
- # sanity check
- check_format(raw_dir)
-
- if fps is None:
- fps = 15
-
- data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
- hf_dataset = to_hf_dataset(data_dict, video)
- episode_data_index = calculate_episode_data_index(hf_dataset)
- info = {
- "codebase_version": CODEBASE_VERSION,
- "fps": fps,
- "video": video,
- }
- if video:
- info["encoding"] = get_default_encoding()
-
- return hf_dataset, episode_data_index, info
diff --git a/lerobot/common/datasets/transforms.py b/lerobot/common/datasets/transforms.py
deleted file mode 100644
index 899f0d66c9..0000000000
--- a/lerobot/common/datasets/transforms.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import collections
-from typing import Any, Callable, Dict, Sequence
-
-import torch
-from torchvision.transforms import v2
-from torchvision.transforms.v2 import Transform
-from torchvision.transforms.v2 import functional as F # noqa: N812
-
-
-class RandomSubsetApply(Transform):
- """Apply a random subset of N transformations from a list of transformations.
-
- Args:
- transforms: list of transformations.
- p: represents the multinomial probabilities (with no replacement) used for sampling the transform.
- If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms
- have the same probability.
- n_subset: number of transformations to apply. If ``None``, all transforms are applied.
- Must be in [1, len(transforms)].
- random_order: apply transformations in a random order.
- """
-
- def __init__(
- self,
- transforms: Sequence[Callable],
- p: list[float] | None = None,
- n_subset: int | None = None,
- random_order: bool = False,
- ) -> None:
- super().__init__()
- if not isinstance(transforms, Sequence):
- raise TypeError("Argument transforms should be a sequence of callables")
- if p is None:
- p = [1] * len(transforms)
- elif len(p) != len(transforms):
- raise ValueError(
- f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}"
- )
-
- if n_subset is None:
- n_subset = len(transforms)
- elif not isinstance(n_subset, int):
- raise TypeError("n_subset should be an int or None")
- elif not (1 <= n_subset <= len(transforms)):
- raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]")
-
- self.transforms = transforms
- total = sum(p)
- self.p = [prob / total for prob in p]
- self.n_subset = n_subset
- self.random_order = random_order
-
- def forward(self, *inputs: Any) -> Any:
- needs_unpacking = len(inputs) > 1
-
- selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset)
- if not self.random_order:
- selected_indices = selected_indices.sort().values
-
- selected_transforms = [self.transforms[i] for i in selected_indices]
-
- for transform in selected_transforms:
- outputs = transform(*inputs)
- inputs = outputs if needs_unpacking else (outputs,)
-
- return outputs
-
- def extra_repr(self) -> str:
- return (
- f"transforms={self.transforms}, "
- f"p={self.p}, "
- f"n_subset={self.n_subset}, "
- f"random_order={self.random_order}"
- )
-
-
-class SharpnessJitter(Transform):
- """Randomly change the sharpness of an image or video.
-
- Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly.
- While v2.RandomAdjustSharpness applies — with a given probability — a fixed sharpness_factor to an image,
- SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of
- augmentations as a result.
-
- A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness
- by a factor of 2.
-
- If the input is a :class:`torch.Tensor`,
- it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
-
- Args:
- sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from
- [max(0, 1 - sharpness), 1 + sharpness] or the given
- [min, max]. Should be non negative numbers.
- """
-
- def __init__(self, sharpness: float | Sequence[float]) -> None:
- super().__init__()
- self.sharpness = self._check_input(sharpness)
-
- def _check_input(self, sharpness):
- if isinstance(sharpness, (int, float)):
- if sharpness < 0:
- raise ValueError("If sharpness is a single number, it must be non negative.")
- sharpness = [1.0 - sharpness, 1.0 + sharpness]
- sharpness[0] = max(sharpness[0], 0.0)
- elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2:
- sharpness = [float(v) for v in sharpness]
- else:
- raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.")
-
- if not 0.0 <= sharpness[0] <= sharpness[1]:
- raise ValueError(f"sharpnesss values should be between (0., inf), but got {sharpness}.")
-
- return float(sharpness[0]), float(sharpness[1])
-
- def _generate_value(self, left: float, right: float) -> float:
- return torch.empty(1).uniform_(left, right).item()
-
- def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
- sharpness_factor = self._generate_value(self.sharpness[0], self.sharpness[1])
- return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor)
-
-
-def get_image_transforms(
- brightness_weight: float = 1.0,
- brightness_min_max: tuple[float, float] | None = None,
- contrast_weight: float = 1.0,
- contrast_min_max: tuple[float, float] | None = None,
- saturation_weight: float = 1.0,
- saturation_min_max: tuple[float, float] | None = None,
- hue_weight: float = 1.0,
- hue_min_max: tuple[float, float] | None = None,
- sharpness_weight: float = 1.0,
- sharpness_min_max: tuple[float, float] | None = None,
- max_num_transforms: int | None = None,
- random_order: bool = False,
-):
- def check_value(name, weight, min_max):
- if min_max is not None:
- if len(min_max) != 2:
- raise ValueError(
- f"`{name}_min_max` is expected to be a tuple of 2 dimensions, but {min_max} provided."
- )
- if weight < 0.0:
- raise ValueError(
- f"`{name}_weight` is expected to be 0 or positive, but is negative ({weight})."
- )
-
- check_value("brightness", brightness_weight, brightness_min_max)
- check_value("contrast", contrast_weight, contrast_min_max)
- check_value("saturation", saturation_weight, saturation_min_max)
- check_value("hue", hue_weight, hue_min_max)
- check_value("sharpness", sharpness_weight, sharpness_min_max)
-
- weights = []
- transforms = []
- if brightness_min_max is not None and brightness_weight > 0.0:
- weights.append(brightness_weight)
- transforms.append(v2.ColorJitter(brightness=brightness_min_max))
- if contrast_min_max is not None and contrast_weight > 0.0:
- weights.append(contrast_weight)
- transforms.append(v2.ColorJitter(contrast=contrast_min_max))
- if saturation_min_max is not None and saturation_weight > 0.0:
- weights.append(saturation_weight)
- transforms.append(v2.ColorJitter(saturation=saturation_min_max))
- if hue_min_max is not None and hue_weight > 0.0:
- weights.append(hue_weight)
- transforms.append(v2.ColorJitter(hue=hue_min_max))
- if sharpness_min_max is not None and sharpness_weight > 0.0:
- weights.append(sharpness_weight)
- transforms.append(SharpnessJitter(sharpness=sharpness_min_max))
-
- n_subset = len(transforms)
- if max_num_transforms is not None:
- n_subset = min(n_subset, max_num_transforms)
-
- if n_subset == 0:
- return v2.Identity()
- else:
- # TODO(rcadene, aliberts): add v2.ToDtype float16?
- return RandomSubsetApply(transforms, p=weights, n_subset=n_subset, random_order=random_order)
diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py
deleted file mode 100644
index 123c5960f5..0000000000
--- a/lerobot/common/datasets/utils.py
+++ /dev/null
@@ -1,562 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import importlib.resources
-import json
-import logging
-import textwrap
-from collections.abc import Iterator
-from itertools import accumulate
-from pathlib import Path
-from pprint import pformat
-from types import SimpleNamespace
-from typing import Any
-
-import datasets
-import jsonlines
-import numpy as np
-import pyarrow.compute as pc
-import torch
-from datasets.table import embed_table_storage
-from huggingface_hub import DatasetCard, DatasetCardData, HfApi
-from PIL import Image as PILImage
-from torchvision import transforms
-
-from lerobot.common.robot_devices.robots.utils import Robot
-
-DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk
-
-INFO_PATH = "meta/info.json"
-EPISODES_PATH = "meta/episodes.jsonl"
-STATS_PATH = "meta/stats.json"
-TASKS_PATH = "meta/tasks.jsonl"
-
-DEFAULT_VIDEO_PATH = "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4"
-DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet"
-DEFAULT_IMAGE_PATH = "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.png"
-
-DATASET_CARD_TEMPLATE = """
----
-# Metadata will go there
----
-This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
-
-## {}
-
-"""
-
-DEFAULT_FEATURES = {
- "timestamp": {"dtype": "float32", "shape": (1,), "names": None},
- "frame_index": {"dtype": "int64", "shape": (1,), "names": None},
- "episode_index": {"dtype": "int64", "shape": (1,), "names": None},
- "index": {"dtype": "int64", "shape": (1,), "names": None},
- "task_index": {"dtype": "int64", "shape": (1,), "names": None},
-}
-
-
-def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict:
- """Flatten a nested dictionary structure by collapsing nested keys into one key with a separator.
-
- For example:
- ```
- >>> dct = {"a": {"b": 1, "c": {"d": 2}}, "e": 3}`
- >>> print(flatten_dict(dct))
- {"a/b": 1, "a/c/d": 2, "e": 3}
- """
- items = []
- for k, v in d.items():
- new_key = f"{parent_key}{sep}{k}" if parent_key else k
- if isinstance(v, dict):
- items.extend(flatten_dict(v, new_key, sep=sep).items())
- else:
- items.append((new_key, v))
- return dict(items)
-
-
-def unflatten_dict(d: dict, sep: str = "/") -> dict:
- outdict = {}
- for key, value in d.items():
- parts = key.split(sep)
- d = outdict
- for part in parts[:-1]:
- if part not in d:
- d[part] = {}
- d = d[part]
- d[parts[-1]] = value
- return outdict
-
-
-def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict:
- serialized_dict = {key: value.tolist() for key, value in flatten_dict(stats).items()}
- return unflatten_dict(serialized_dict)
-
-
-def write_parquet(dataset: datasets.Dataset, fpath: Path) -> None:
- # Embed image bytes into the table before saving to parquet
- format = dataset.format
- dataset = dataset.with_format("arrow")
- dataset = dataset.map(embed_table_storage, batched=False)
- dataset = dataset.with_format(**format)
- dataset.to_parquet(fpath)
-
-
-def load_json(fpath: Path) -> Any:
- with open(fpath) as f:
- return json.load(f)
-
-
-def write_json(data: dict, fpath: Path) -> None:
- fpath.parent.mkdir(exist_ok=True, parents=True)
- with open(fpath, "w") as f:
- json.dump(data, f, indent=4, ensure_ascii=False)
-
-
-def load_jsonlines(fpath: Path) -> list[Any]:
- with jsonlines.open(fpath, "r") as reader:
- return list(reader)
-
-
-def write_jsonlines(data: dict, fpath: Path) -> None:
- fpath.parent.mkdir(exist_ok=True, parents=True)
- with jsonlines.open(fpath, "w") as writer:
- writer.write_all(data)
-
-
-def append_jsonlines(data: dict, fpath: Path) -> None:
- fpath.parent.mkdir(exist_ok=True, parents=True)
- with jsonlines.open(fpath, "a") as writer:
- writer.write(data)
-
-
-def load_info(local_dir: Path) -> dict:
- info = load_json(local_dir / INFO_PATH)
- for ft in info["features"].values():
- ft["shape"] = tuple(ft["shape"])
- return info
-
-
-def load_stats(local_dir: Path) -> dict:
- if not (local_dir / STATS_PATH).exists():
- return None
- stats = load_json(local_dir / STATS_PATH)
- stats = {key: torch.tensor(value) for key, value in flatten_dict(stats).items()}
- return unflatten_dict(stats)
-
-
-def load_tasks(local_dir: Path) -> dict:
- tasks = load_jsonlines(local_dir / TASKS_PATH)
- return {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])}
-
-
-def load_episodes(local_dir: Path) -> dict:
- return load_jsonlines(local_dir / EPISODES_PATH)
-
-
-def load_image_as_numpy(fpath: str | Path, dtype="float32", channel_first: bool = True) -> np.ndarray:
- img = PILImage.open(fpath).convert("RGB")
- img_array = np.array(img, dtype=dtype)
- if channel_first: # (H, W, C) -> (C, H, W)
- img_array = np.transpose(img_array, (2, 0, 1))
- if "float" in dtype:
- img_array /= 255.0
- return img_array
-
-
-def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
- """Get a transform function that convert items from Hugging Face dataset (pyarrow)
- to torch tensors. Importantly, images are converted from PIL, which corresponds to
- a channel last representation (h w c) of uint8 type, to a torch image representation
- with channel first (c h w) of float32 type in range [0,1].
- """
- for key in items_dict:
- first_item = items_dict[key][0]
- if isinstance(first_item, PILImage.Image):
- to_tensor = transforms.ToTensor()
- items_dict[key] = [to_tensor(img) for img in items_dict[key]]
- elif first_item is None:
- pass
- else:
- items_dict[key] = [torch.tensor(x) for x in items_dict[key]]
- return items_dict
-
-
-def _get_major_minor(version: str) -> tuple[int]:
- split = version.strip("v").split(".")
- return int(split[0]), int(split[1])
-
-
-class BackwardCompatibilityError(Exception):
- def __init__(self, repo_id, version):
- message = textwrap.dedent(f"""
- BackwardCompatibilityError: The dataset you requested ({repo_id}) is in {version} format.
-
- We introduced a new format since v2.0 which is not backward compatible with v1.x.
- Please, use our conversion script. Modify the following command with your own task description:
- ```
- python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \\
- --repo-id {repo_id} \\
- --single-task "TASK DESCRIPTION." # <---- /!\\ Replace TASK DESCRIPTION /!\\
- ```
-
- A few examples to replace TASK DESCRIPTION: "Pick up the blue cube and place it into the bin.",
- "Insert the peg into the socket.", "Slide open the ziploc bag.", "Take the elevator to the 1st floor.",
- "Open the top cabinet, store the pot inside it then close the cabinet.", "Push the T-shaped block onto the T-shaped target.",
- "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.", "Fold the sweatshirt.", ...
-
- If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb)
- or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose).
- """)
- super().__init__(message)
-
-
-def check_version_compatibility(
- repo_id: str, version_to_check: str, current_version: str, enforce_breaking_major: bool = True
-) -> None:
- current_major, _ = _get_major_minor(current_version)
- major_to_check, _ = _get_major_minor(version_to_check)
- if major_to_check < current_major and enforce_breaking_major:
- raise BackwardCompatibilityError(repo_id, version_to_check)
- elif float(version_to_check.strip("v")) < float(current_version.strip("v")):
- logging.warning(
- f"""The dataset you requested ({repo_id}) was created with a previous version ({version_to_check}) of the
- codebase. The current codebase version is {current_version}. You should be fine since
- backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on
- Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""",
- )
-
-
-def get_hub_safe_version(repo_id: str, version: str) -> str:
- api = HfApi()
- dataset_info = api.list_repo_refs(repo_id, repo_type="dataset")
- branches = [b.name for b in dataset_info.branches]
- if version not in branches:
- num_version = float(version.strip("v"))
- hub_num_versions = [float(v.strip("v")) for v in branches if v.startswith("v")]
- if num_version >= 2.0 and all(v < 2.0 for v in hub_num_versions):
- raise BackwardCompatibilityError(repo_id, version)
-
- logging.warning(
- f"""You are trying to load a dataset from {repo_id} created with a previous version of the
- codebase. The following versions are available: {branches}.
- The requested version ('{version}') is not found. You should be fine since
- backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on
- Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""",
- )
- if "main" not in branches:
- raise ValueError(f"Version 'main' not found on {repo_id}")
- return "main"
- else:
- return version
-
-
-def get_hf_features_from_features(features: dict) -> datasets.Features:
- hf_features = {}
- for key, ft in features.items():
- if ft["dtype"] == "video":
- continue
- elif ft["dtype"] == "image":
- hf_features[key] = datasets.Image()
- elif ft["shape"] == (1,):
- hf_features[key] = datasets.Value(dtype=ft["dtype"])
- else:
- assert len(ft["shape"]) == 1
- hf_features[key] = datasets.Sequence(
- length=ft["shape"][0], feature=datasets.Value(dtype=ft["dtype"])
- )
-
- return datasets.Features(hf_features)
-
-
-def get_features_from_robot(robot: Robot, use_videos: bool = True) -> dict:
- camera_ft = {}
- if robot.cameras:
- camera_ft = {
- key: {"dtype": "video" if use_videos else "image", **ft}
- for key, ft in robot.camera_features.items()
- }
- return {**robot.motor_features, **camera_ft, **DEFAULT_FEATURES}
-
-
-def create_empty_dataset_info(
- codebase_version: str,
- fps: int,
- robot_type: str,
- features: dict,
- use_videos: bool,
-) -> dict:
- return {
- "codebase_version": codebase_version,
- "robot_type": robot_type,
- "total_episodes": 0,
- "total_frames": 0,
- "total_tasks": 0,
- "total_videos": 0,
- "total_chunks": 0,
- "chunks_size": DEFAULT_CHUNK_SIZE,
- "fps": fps,
- "splits": {},
- "data_path": DEFAULT_PARQUET_PATH,
- "video_path": DEFAULT_VIDEO_PATH if use_videos else None,
- "features": features,
- }
-
-
-def get_episode_data_index(
- episode_dicts: list[dict], episodes: list[int] | None = None
-) -> dict[str, torch.Tensor]:
- episode_lengths = {ep_idx: ep_dict["length"] for ep_idx, ep_dict in enumerate(episode_dicts)}
- if episodes is not None:
- episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes}
-
- cumulative_lenghts = list(accumulate(episode_lengths.values()))
- return {
- "from": torch.LongTensor([0] + cumulative_lenghts[:-1]),
- "to": torch.LongTensor(cumulative_lenghts),
- }
-
-
-def calculate_total_episode(
- hf_dataset: datasets.Dataset, raise_if_not_contiguous: bool = True
-) -> dict[str, torch.Tensor]:
- episode_indices = sorted(hf_dataset.unique("episode_index"))
- total_episodes = len(episode_indices)
- if raise_if_not_contiguous and episode_indices != list(range(total_episodes)):
- raise ValueError("episode_index values are not sorted and contiguous.")
- return total_episodes
-
-
-def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]:
- episode_lengths = []
- table = hf_dataset.data.table
- total_episodes = calculate_total_episode(hf_dataset)
- for ep_idx in range(total_episodes):
- ep_table = table.filter(pc.equal(table["episode_index"], ep_idx))
- episode_lengths.insert(ep_idx, len(ep_table))
-
- cumulative_lenghts = list(accumulate(episode_lengths))
- return {
- "from": torch.LongTensor([0] + cumulative_lenghts[:-1]),
- "to": torch.LongTensor(cumulative_lenghts),
- }
-
-
-def check_timestamps_sync(
- hf_dataset: datasets.Dataset,
- episode_data_index: dict[str, torch.Tensor],
- fps: int,
- tolerance_s: float,
- raise_value_error: bool = True,
-) -> bool:
- """
- This check is to make sure that each timestamps is separated to the next by 1/fps +/- tolerance to
- account for possible numerical error.
- """
- timestamps = torch.stack(hf_dataset["timestamp"])
- diffs = torch.diff(timestamps)
- within_tolerance = torch.abs(diffs - 1 / fps) <= tolerance_s
-
- # We mask differences between the timestamp at the end of an episode
- # and the one at the start of the next episode since these are expected
- # to be outside tolerance.
- mask = torch.ones(len(diffs), dtype=torch.bool)
- ignored_diffs = episode_data_index["to"][:-1] - 1
- mask[ignored_diffs] = False
- filtered_within_tolerance = within_tolerance[mask]
-
- if not torch.all(filtered_within_tolerance):
- # Track original indices before masking
- original_indices = torch.arange(len(diffs))
- filtered_indices = original_indices[mask]
- outside_tolerance_filtered_indices = torch.nonzero(~filtered_within_tolerance) # .squeeze()
- outside_tolerance_indices = filtered_indices[outside_tolerance_filtered_indices]
- episode_indices = torch.stack(hf_dataset["episode_index"])
-
- outside_tolerances = []
- for idx in outside_tolerance_indices:
- entry = {
- "timestamps": [timestamps[idx], timestamps[idx + 1]],
- "diff": diffs[idx],
- "episode_index": episode_indices[idx].item(),
- }
- outside_tolerances.append(entry)
-
- if raise_value_error:
- raise ValueError(
- f"""One or several timestamps unexpectedly violate the tolerance inside episode range.
- This might be due to synchronization issues with timestamps during data collection.
- \n{pformat(outside_tolerances)}"""
- )
- return False
-
- return True
-
-
-def check_delta_timestamps(
- delta_timestamps: dict[str, list[float]], fps: int, tolerance_s: float, raise_value_error: bool = True
-) -> bool:
- """This will check if all the values in delta_timestamps are multiples of 1/fps +/- tolerance.
- This is to ensure that these delta_timestamps added to any timestamp from a dataset will themselves be
- actual timestamps from the dataset.
- """
- outside_tolerance = {}
- for key, delta_ts in delta_timestamps.items():
- within_tolerance = [abs(ts * fps - round(ts * fps)) / fps <= tolerance_s for ts in delta_ts]
- if not all(within_tolerance):
- outside_tolerance[key] = [
- ts for ts, is_within in zip(delta_ts, within_tolerance, strict=True) if not is_within
- ]
-
- if len(outside_tolerance) > 0:
- if raise_value_error:
- raise ValueError(
- f"""
- The following delta_timestamps are found outside of tolerance range.
- Please make sure they are multiples of 1/{fps} +/- tolerance and adjust
- their values accordingly.
- \n{pformat(outside_tolerance)}
- """
- )
- return False
-
- return True
-
-
-def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]:
- delta_indices = {}
- for key, delta_ts in delta_timestamps.items():
- delta_indices[key] = (torch.tensor(delta_ts) * fps).long().tolist()
-
- return delta_indices
-
-
-def cycle(iterable):
- """The equivalent of itertools.cycle, but safe for Pytorch dataloaders.
-
- See https://github.com/pytorch/pytorch/issues/23900 for information on why itertools.cycle is not safe.
- """
- iterator = iter(iterable)
- while True:
- try:
- yield next(iterator)
- except StopIteration:
- iterator = iter(iterable)
-
-
-def create_branch(repo_id, *, branch: str, repo_type: str | None = None) -> None:
- """Create a branch on a existing Hugging Face repo. Delete the branch if it already
- exists before creating it.
- """
- api = HfApi()
-
- branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches
- refs = [branch.ref for branch in branches]
- ref = f"refs/heads/{branch}"
- if ref in refs:
- api.delete_branch(repo_id, repo_type=repo_type, branch=branch)
-
- api.create_branch(repo_id, repo_type=repo_type, branch=branch)
-
-
-def create_lerobot_dataset_card(
- tags: list | None = None,
- dataset_info: dict | None = None,
- **kwargs,
-) -> DatasetCard:
- """
- Keyword arguments will be used to replace values in ./lerobot/common/datasets/card_template.md.
- Note: If specified, license must be one of https://huggingface.co/docs/hub/repositories-licenses.
- """
- card_tags = ["LeRobot"]
-
- if tags:
- card_tags += tags
- if dataset_info:
- dataset_structure = "[meta/info.json](meta/info.json):\n"
- dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n"
- kwargs = {**kwargs, "dataset_structure": dataset_structure}
- card_data = DatasetCardData(
- license=kwargs.get("license"),
- tags=card_tags,
- task_categories=["robotics"],
- configs=[
- {
- "config_name": "default",
- "data_files": "data/*/*.parquet",
- }
- ],
- )
-
- card_template = (importlib.resources.files("lerobot.common.datasets") / "card_template.md").read_text()
-
- return DatasetCard.from_template(
- card_data=card_data,
- template_str=card_template,
- **kwargs,
- )
-
-
-class IterableNamespace(SimpleNamespace):
- """
- A namespace object that supports both dictionary-like iteration and dot notation access.
- Automatically converts nested dictionaries into IterableNamespaces.
-
- This class extends SimpleNamespace to provide:
- - Dictionary-style iteration over keys
- - Access to items via both dot notation (obj.key) and brackets (obj["key"])
- - Dictionary-like methods: items(), keys(), values()
- - Recursive conversion of nested dictionaries
-
- Args:
- dictionary: Optional dictionary to initialize the namespace
- **kwargs: Additional keyword arguments passed to SimpleNamespace
-
- Examples:
- >>> data = {"name": "Alice", "details": {"age": 25}}
- >>> ns = IterableNamespace(data)
- >>> ns.name
- 'Alice'
- >>> ns.details.age
- 25
- >>> list(ns.keys())
- ['name', 'details']
- >>> for key, value in ns.items():
- ... print(f"{key}: {value}")
- name: Alice
- details: IterableNamespace(age=25)
- """
-
- def __init__(self, dictionary: dict[str, Any] = None, **kwargs):
- super().__init__(**kwargs)
- if dictionary is not None:
- for key, value in dictionary.items():
- if isinstance(value, dict):
- setattr(self, key, IterableNamespace(value))
- else:
- setattr(self, key, value)
-
- def __iter__(self) -> Iterator[str]:
- return iter(vars(self))
-
- def __getitem__(self, key: str) -> Any:
- return vars(self)[key]
-
- def items(self):
- return vars(self).items()
-
- def values(self):
- return vars(self).values()
-
- def keys(self):
- return vars(self).keys()
diff --git a/lerobot/common/datasets/video_utils.py b/lerobot/common/datasets/video_utils.py
deleted file mode 100644
index 8ed3318dd2..0000000000
--- a/lerobot/common/datasets/video_utils.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import json
-import logging
-import subprocess
-import warnings
-from collections import OrderedDict
-from dataclasses import dataclass, field
-from pathlib import Path
-from typing import Any, ClassVar
-
-import pyarrow as pa
-import torch
-import torchvision
-from datasets.features.features import register_feature
-from PIL import Image
-
-
-def decode_video_frames_torchvision(
- video_path: Path | str,
- timestamps: list[float],
- tolerance_s: float,
- backend: str = "pyav",
- log_loaded_timestamps: bool = False,
-) -> torch.Tensor:
- """Loads frames associated to the requested timestamps of a video
-
- The backend can be either "pyav" (default) or "video_reader".
- "video_reader" requires installing torchvision from source, see:
- https://github.com/pytorch/vision/blob/main/torchvision/csrc/io/decoder/gpu/README.rst
- (note that you need to compile against ffmpeg<4.3)
-
- While both use cpu, "video_reader" is supposedly faster than "pyav" but requires additional setup.
- For more info on video decoding, see `benchmark/video/README.md`
-
- See torchvision doc for more info on these two backends:
- https://pytorch.org/vision/0.18/index.html?highlight=backend#torchvision.set_video_backend
-
- Note: Video benefits from inter-frame compression. Instead of storing every frame individually,
- the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to
- that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame,
- and all subsequent frames until reaching the requested frame. The number of key frames in a video
- can be adjusted during encoding to take into account decoding time and video size in bytes.
- """
- video_path = str(video_path)
-
- # set backend
- keyframes_only = False
- torchvision.set_video_backend(backend)
- if backend == "pyav":
- keyframes_only = True # pyav doesnt support accuracte seek
-
- # set a video stream reader
- # TODO(rcadene): also load audio stream at the same time
- reader = torchvision.io.VideoReader(video_path, "video")
-
- # set the first and last requested timestamps
- # Note: previous timestamps are usually loaded, since we need to access the previous key frame
- first_ts = timestamps[0]
- last_ts = timestamps[-1]
-
- # access closest key frame of the first requested frame
- # Note: closest key frame timestamp is usally smaller than `first_ts` (e.g. key frame can be the first frame of the video)
- # for details on what `seek` is doing see: https://pyav.basswood-io.com/docs/stable/api/container.html?highlight=inputcontainer#av.container.InputContainer.seek
- reader.seek(first_ts, keyframes_only=keyframes_only)
-
- # load all frames until last requested frame
- loaded_frames = []
- loaded_ts = []
- for frame in reader:
- current_ts = frame["pts"]
- if log_loaded_timestamps:
- logging.info(f"frame loaded at timestamp={current_ts:.4f}")
- loaded_frames.append(frame["data"])
- loaded_ts.append(current_ts)
- if current_ts >= last_ts:
- break
-
- if backend == "pyav":
- reader.container.close()
-
- reader = None
-
- query_ts = torch.tensor(timestamps)
- loaded_ts = torch.tensor(loaded_ts)
-
- # compute distances between each query timestamp and timestamps of all loaded frames
- dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1)
- min_, argmin_ = dist.min(1)
-
- is_within_tol = min_ < tolerance_s
- assert is_within_tol.all(), (
- f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})."
- "It means that the closest frame that can be loaded from the video is too far away in time."
- "This might be due to synchronization issues with timestamps during data collection."
- "To be safe, we advise to ignore this item during training."
- f"\nqueried timestamps: {query_ts}"
- f"\nloaded timestamps: {loaded_ts}"
- f"\nvideo: {video_path}"
- f"\nbackend: {backend}"
- )
-
- # get closest frames to the query timestamps
- closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_])
- closest_ts = loaded_ts[argmin_]
-
- if log_loaded_timestamps:
- logging.info(f"{closest_ts=}")
-
- # convert to the pytorch format which is float32 in [0,1] range (and channel first)
- closest_frames = closest_frames.type(torch.float32) / 255
-
- assert len(timestamps) == len(closest_frames)
- return closest_frames
-
-
-def encode_video_frames(
- imgs_dir: Path | str,
- video_path: Path | str,
- fps: int,
- vcodec: str = "libsvtav1",
- pix_fmt: str = "yuv420p",
- g: int | None = 2,
- crf: int | None = 30,
- fast_decode: int = 0,
- log_level: str | None = "error",
- overwrite: bool = False,
-) -> None:
- """More info on ffmpeg arguments tuning on `benchmark/video/README.md`"""
- video_path = Path(video_path)
- video_path.parent.mkdir(parents=True, exist_ok=True)
-
- ffmpeg_args = OrderedDict(
- [
- ("-f", "image2"),
- ("-r", str(fps)),
- ("-i", str(imgs_dir / "frame_%06d.png")),
- ("-vcodec", vcodec),
- ("-pix_fmt", pix_fmt),
- ]
- )
-
- if g is not None:
- ffmpeg_args["-g"] = str(g)
-
- if crf is not None:
- ffmpeg_args["-crf"] = str(crf)
-
- if fast_decode:
- key = "-svtav1-params" if vcodec == "libsvtav1" else "-tune"
- value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode"
- ffmpeg_args[key] = value
-
- if log_level is not None:
- ffmpeg_args["-loglevel"] = str(log_level)
-
- ffmpeg_args = [item for pair in ffmpeg_args.items() for item in pair]
- if overwrite:
- ffmpeg_args.append("-y")
-
- ffmpeg_cmd = ["ffmpeg"] + ffmpeg_args + [str(video_path)]
- # redirect stdin to subprocess.DEVNULL to prevent reading random keyboard inputs from terminal
- subprocess.run(ffmpeg_cmd, check=True, stdin=subprocess.DEVNULL)
-
- if not video_path.exists():
- raise OSError(
- f"Video encoding did not work. File not found: {video_path}. "
- f"Try running the command manually to debug: `{''.join(ffmpeg_cmd)}`"
- )
-
-
-@dataclass
-class VideoFrame:
- # TODO(rcadene, lhoestq): move to Hugging Face `datasets` repo
- """
- Provides a type for a dataset containing video frames.
-
- Example:
-
- ```python
- data_dict = [{"image": {"path": "videos/episode_0.mp4", "timestamp": 0.3}}]
- features = {"image": VideoFrame()}
- Dataset.from_dict(data_dict, features=Features(features))
- ```
- """
-
- pa_type: ClassVar[Any] = pa.struct({"path": pa.string(), "timestamp": pa.float32()})
- _type: str = field(default="VideoFrame", init=False, repr=False)
-
- def __call__(self):
- return self.pa_type
-
-
-with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore",
- "'register_feature' is experimental and might be subject to breaking changes in the future.",
- category=UserWarning,
- )
- # to make VideoFrame available in HuggingFace `datasets`
- register_feature(VideoFrame, "VideoFrame")
-
-
-def get_audio_info(video_path: Path | str) -> dict:
- ffprobe_audio_cmd = [
- "ffprobe",
- "-v",
- "error",
- "-select_streams",
- "a:0",
- "-show_entries",
- "stream=channels,codec_name,bit_rate,sample_rate,bit_depth,channel_layout,duration",
- "-of",
- "json",
- str(video_path),
- ]
- result = subprocess.run(ffprobe_audio_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
- if result.returncode != 0:
- raise RuntimeError(f"Error running ffprobe: {result.stderr}")
-
- info = json.loads(result.stdout)
- audio_stream_info = info["streams"][0] if info.get("streams") else None
- if audio_stream_info is None:
- return {"has_audio": False}
-
- # Return the information, defaulting to None if no audio stream is present
- return {
- "has_audio": True,
- "audio.channels": audio_stream_info.get("channels", None),
- "audio.codec": audio_stream_info.get("codec_name", None),
- "audio.bit_rate": int(audio_stream_info["bit_rate"]) if audio_stream_info.get("bit_rate") else None,
- "audio.sample_rate": int(audio_stream_info["sample_rate"])
- if audio_stream_info.get("sample_rate")
- else None,
- "audio.bit_depth": audio_stream_info.get("bit_depth", None),
- "audio.channel_layout": audio_stream_info.get("channel_layout", None),
- }
-
-
-def get_video_info(video_path: Path | str) -> dict:
- ffprobe_video_cmd = [
- "ffprobe",
- "-v",
- "error",
- "-select_streams",
- "v:0",
- "-show_entries",
- "stream=r_frame_rate,width,height,codec_name,nb_frames,duration,pix_fmt",
- "-of",
- "json",
- str(video_path),
- ]
- result = subprocess.run(ffprobe_video_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
- if result.returncode != 0:
- raise RuntimeError(f"Error running ffprobe: {result.stderr}")
-
- info = json.loads(result.stdout)
- video_stream_info = info["streams"][0]
-
- # Calculate fps from r_frame_rate
- r_frame_rate = video_stream_info["r_frame_rate"]
- num, denom = map(int, r_frame_rate.split("/"))
- fps = num / denom
-
- pixel_channels = get_video_pixel_channels(video_stream_info["pix_fmt"])
-
- video_info = {
- "video.fps": fps,
- "video.height": video_stream_info["height"],
- "video.width": video_stream_info["width"],
- "video.channels": pixel_channels,
- "video.codec": video_stream_info["codec_name"],
- "video.pix_fmt": video_stream_info["pix_fmt"],
- "video.is_depth_map": False,
- **get_audio_info(video_path),
- }
-
- return video_info
-
-
-def get_video_pixel_channels(pix_fmt: str) -> int:
- if "gray" in pix_fmt or "depth" in pix_fmt or "monochrome" in pix_fmt:
- return 1
- elif "rgba" in pix_fmt or "yuva" in pix_fmt:
- return 4
- elif "rgb" in pix_fmt or "yuv" in pix_fmt:
- return 3
- else:
- raise ValueError("Unknown format")
-
-
-def get_image_pixel_channels(image: Image):
- if image.mode == "L":
- return 1 # Grayscale
- elif image.mode == "LA":
- return 2 # Grayscale + Alpha
- elif image.mode == "RGB":
- return 3 # RGB
- elif image.mode == "RGBA":
- return 4 # RGBA
- else:
- raise ValueError("Unknown format")
diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py
deleted file mode 100644
index 54f24ea84b..0000000000
--- a/lerobot/common/envs/factory.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import importlib
-
-import gymnasium as gym
-from omegaconf import DictConfig
-
-
-def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv | None:
- """Makes a gym vector environment according to the evaluation config.
-
- n_envs can be used to override eval.batch_size in the configuration. Must be at least 1.
- """
- if n_envs is not None and n_envs < 1:
- raise ValueError("`n_envs must be at least 1")
-
- if cfg.env.name == "real_world":
- return
-
- package_name = f"gym_{cfg.env.name}"
-
- try:
- importlib.import_module(package_name)
- except ModuleNotFoundError as e:
- print(
- f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.env.name}]'`"
- )
- raise e
-
- gym_handle = f"{package_name}/{cfg.env.task}"
- gym_kwgs = dict(cfg.env.get("gym", {}))
-
- if cfg.env.get("episode_length"):
- gym_kwgs["max_episode_steps"] = cfg.env.episode_length
-
- # batched version of the env that returns an observation of shape (b, c)
- env_cls = gym.vector.AsyncVectorEnv if cfg.eval.use_async_envs else gym.vector.SyncVectorEnv
- env = env_cls(
- [
- lambda: gym.make(gym_handle, disable_env_checker=True, **gym_kwgs)
- for _ in range(n_envs if n_envs is not None else cfg.eval.batch_size)
- ]
- )
-
- return env
diff --git a/lerobot/common/envs/utils.py b/lerobot/common/envs/utils.py
deleted file mode 100644
index 001973bc1b..0000000000
--- a/lerobot/common/envs/utils.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import einops
-import numpy as np
-import torch
-from torch import Tensor
-
-
-def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]:
- """Convert environment observation to LeRobot format observation.
- Args:
- observation: Dictionary of observation batches from a Gym vector environment.
- Returns:
- Dictionary of observation batches with keys renamed to LeRobot format and values as tensors.
- """
- # map to expected inputs for the policy
- return_observations = {}
- if "pixels" in observations:
- if isinstance(observations["pixels"], dict):
- imgs = {f"observation.images.{key}": img for key, img in observations["pixels"].items()}
- else:
- imgs = {"observation.image": observations["pixels"]}
-
- for imgkey, img in imgs.items():
- img = torch.from_numpy(img)
-
- # sanity check that images are channel last
- _, h, w, c = img.shape
- assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}"
-
- # sanity check that images are uint8
- assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}"
-
- # convert to channel first of type float32 in range [0,1]
- img = einops.rearrange(img, "b h w c -> b c h w").contiguous()
- img = img.type(torch.float32)
- img /= 255
-
- return_observations[imgkey] = img
-
- if "environment_state" in observations:
- return_observations["observation.environment_state"] = torch.from_numpy(
- observations["environment_state"]
- ).float()
-
- # TODO(rcadene): enable pixels only baseline with `obs_type="pixels"` in environment by removing
- # requirement for "agent_pos"
- return_observations["observation.state"] = torch.from_numpy(observations["agent_pos"]).float()
- return return_observations
diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py
deleted file mode 100644
index 3bd2df89ab..0000000000
--- a/lerobot/common/logger.py
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Borrowed from https://github.com/fyhMer/fowm/blob/main/src/logger.py
-
-# TODO(rcadene, alexander-soare): clean this file
-"""
-
-import logging
-import os
-import re
-from glob import glob
-from pathlib import Path
-
-import torch
-from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
-from omegaconf import DictConfig, OmegaConf
-from termcolor import colored
-from torch.optim import Optimizer
-from torch.optim.lr_scheduler import LRScheduler
-
-from lerobot.common.policies.policy_protocol import Policy
-from lerobot.common.utils.utils import get_global_random_state, set_global_random_state
-
-
-def log_output_dir(out_dir):
- logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {out_dir}")
-
-
-def cfg_to_group(cfg: DictConfig, return_list: bool = False) -> list[str] | str:
- """Return a group name for logging. Optionally returns group name as list."""
- lst = [
- f"policy:{cfg.policy.name}",
- f"dataset:{cfg.dataset_repo_id}",
- f"env:{cfg.env.name}",
- f"seed:{cfg.seed}",
- ]
- return lst if return_list else "-".join(lst)
-
-
-def get_wandb_run_id_from_filesystem(checkpoint_dir: Path) -> str:
- # Get the WandB run ID.
- paths = glob(str(checkpoint_dir / "../wandb/latest-run/run-*"))
- if len(paths) != 1:
- raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
- match = re.search(r"run-([^\.]+).wandb", paths[0].split("/")[-1])
- if match is None:
- raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
- wandb_run_id = match.groups(0)[0]
- return wandb_run_id
-
-
-class Logger:
- """Primary logger object. Logs either locally or using wandb.
-
- The logger creates the following directory structure:
-
- provided_log_dir
- ├── .hydra # hydra's configuration cache
- ├── checkpoints
- │ ├── specific_checkpoint_name
- │ │ ├── pretrained_model # Hugging Face pretrained model directory
- │ │ │ ├── ...
- │ │ └── training_state.pth # optimizer, scheduler, and random states + training step
- | ├── another_specific_checkpoint_name
- │ │ ├── ...
- | ├── ...
- │ └── last # a softlink to the last logged checkpoint
- """
-
- pretrained_model_dir_name = "pretrained_model"
- training_state_file_name = "training_state.pth"
-
- def __init__(self, cfg: DictConfig, log_dir: str, wandb_job_name: str | None = None):
- """
- Args:
- log_dir: The directory to save all logs and training outputs to.
- job_name: The WandB job name.
- """
- self._cfg = cfg
- self.log_dir = Path(log_dir)
- self.log_dir.mkdir(parents=True, exist_ok=True)
- self.checkpoints_dir = self.get_checkpoints_dir(log_dir)
- self.last_checkpoint_dir = self.get_last_checkpoint_dir(log_dir)
- self.last_pretrained_model_dir = self.get_last_pretrained_model_dir(log_dir)
-
- # Set up WandB.
- self._group = cfg_to_group(cfg)
- project = cfg.get("wandb", {}).get("project")
- entity = cfg.get("wandb", {}).get("entity")
- enable_wandb = cfg.get("wandb", {}).get("enable", False)
- run_offline = not enable_wandb or not project
- if run_offline:
- logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"]))
- self._wandb = None
- else:
- os.environ["WANDB_SILENT"] = "true"
- import wandb
-
- wandb_run_id = None
- if cfg.resume:
- wandb_run_id = get_wandb_run_id_from_filesystem(self.checkpoints_dir)
-
- wandb.init(
- id=wandb_run_id,
- project=project,
- entity=entity,
- name=wandb_job_name,
- notes=cfg.get("wandb", {}).get("notes"),
- tags=cfg_to_group(cfg, return_list=True),
- dir=log_dir,
- config=OmegaConf.to_container(cfg, resolve=True),
- # TODO(rcadene): try set to True
- save_code=False,
- # TODO(rcadene): split train and eval, and run async eval with job_type="eval"
- job_type="train_eval",
- resume="must" if cfg.resume else None,
- )
- print(colored("Logs will be synced with wandb.", "blue", attrs=["bold"]))
- logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}")
- self._wandb = wandb
-
- @classmethod
- def get_checkpoints_dir(cls, log_dir: str | Path) -> Path:
- """Given the log directory, get the sub-directory in which checkpoints will be saved."""
- return Path(log_dir) / "checkpoints"
-
- @classmethod
- def get_last_checkpoint_dir(cls, log_dir: str | Path) -> Path:
- """Given the log directory, get the sub-directory in which the last checkpoint will be saved."""
- return cls.get_checkpoints_dir(log_dir) / "last"
-
- @classmethod
- def get_last_pretrained_model_dir(cls, log_dir: str | Path) -> Path:
- """
- Given the log directory, get the sub-directory in which the last checkpoint's pretrained weights will
- be saved.
- """
- return cls.get_last_checkpoint_dir(log_dir) / cls.pretrained_model_dir_name
-
- def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | None = None):
- """Save the weights of the Policy model using PyTorchModelHubMixin.
-
- The weights are saved in a folder called "pretrained_model" under the checkpoint directory.
-
- Optionally also upload the model to WandB.
- """
- self.checkpoints_dir.mkdir(parents=True, exist_ok=True)
- policy.save_pretrained(save_dir)
- # Also save the full Hydra config for the env configuration.
- OmegaConf.save(self._cfg, save_dir / "config.yaml")
- if self._wandb and not self._cfg.wandb.disable_artifact:
- # note wandb artifact does not accept ":" or "/" in its name
- artifact = self._wandb.Artifact(wandb_artifact_name, type="model")
- artifact.add_file(save_dir / SAFETENSORS_SINGLE_FILE)
- self._wandb.log_artifact(artifact)
- if self.last_checkpoint_dir.exists():
- os.remove(self.last_checkpoint_dir)
-
- def save_training_state(
- self,
- save_dir: Path,
- train_step: int,
- optimizer: Optimizer,
- scheduler: LRScheduler | None,
- ):
- """Checkpoint the global training_step, optimizer state, scheduler state, and random state.
-
- All of these are saved as "training_state.pth" under the checkpoint directory.
- """
- training_state = {
- "step": train_step,
- "optimizer": optimizer.state_dict(),
- **get_global_random_state(),
- }
- if scheduler is not None:
- training_state["scheduler"] = scheduler.state_dict()
- torch.save(training_state, save_dir / self.training_state_file_name)
-
- def save_checkpoint(
- self,
- train_step: int,
- policy: Policy,
- optimizer: Optimizer,
- scheduler: LRScheduler | None,
- identifier: str,
- ):
- """Checkpoint the model weights and the training state."""
- checkpoint_dir = self.checkpoints_dir / str(identifier)
- wandb_artifact_name = (
- None
- if self._wandb is None
- else f"{self._group.replace(':', '_').replace('/', '_')}-{self._cfg.seed}-{identifier}"
- )
- self.save_model(
- checkpoint_dir / self.pretrained_model_dir_name, policy, wandb_artifact_name=wandb_artifact_name
- )
- self.save_training_state(checkpoint_dir, train_step, optimizer, scheduler)
- os.symlink(checkpoint_dir.absolute(), self.last_checkpoint_dir)
-
- def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler | None) -> int:
- """
- Given the last checkpoint in the logging directory, load the optimizer state, scheduler state, and
- random state, and return the global training step.
- """
- training_state = torch.load(self.last_checkpoint_dir / self.training_state_file_name)
- optimizer.load_state_dict(training_state["optimizer"])
- if scheduler is not None:
- scheduler.load_state_dict(training_state["scheduler"])
- elif "scheduler" in training_state:
- raise ValueError(
- "The checkpoint contains a scheduler state_dict, but no LRScheduler was provided."
- )
- # Small hack to get the expected keys: use `get_global_random_state`.
- set_global_random_state({k: training_state[k] for k in get_global_random_state()})
- return training_state["step"]
-
- def log_dict(self, d, step, mode="train"):
- assert mode in {"train", "eval"}
- # TODO(alexander-soare): Add local text log.
- if self._wandb is not None:
- for k, v in d.items():
- if not isinstance(v, (int, float, str)):
- logging.warning(
- f'WandB logging of key "{k}" was ignored as its type is not handled by this wrapper.'
- )
- continue
- self._wandb.log({f"{mode}/{k}": v}, step=step)
-
- def log_video(self, video_path: str, step: int, mode: str = "train"):
- assert mode in {"train", "eval"}
- assert self._wandb is not None
- wandb_video = self._wandb.Video(video_path, fps=self._cfg.fps, format="mp4")
- self._wandb.log({f"{mode}/video": wandb_video}, step=step)
diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py
deleted file mode 100644
index 5cb2fd5269..0000000000
--- a/lerobot/common/policies/factory.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import inspect
-import logging
-
-from omegaconf import DictConfig, OmegaConf
-
-from lerobot.common.policies.policy_protocol import Policy
-from lerobot.common.utils.utils import get_safe_torch_device
-
-
-def _policy_cfg_from_hydra_cfg(policy_cfg_class, hydra_cfg):
- expected_kwargs = set(inspect.signature(policy_cfg_class).parameters)
- if not set(hydra_cfg.policy).issuperset(expected_kwargs):
- logging.warning(
- f"Hydra config is missing arguments: {set(expected_kwargs).difference(hydra_cfg.policy)}"
- )
-
- # OmegaConf.to_container returns lists where sequences are found, but our dataclasses use tuples to avoid
- # issues with mutable defaults. This filter changes all lists to tuples.
- def list_to_tuple(item):
- return tuple(item) if isinstance(item, list) else item
-
- policy_cfg = policy_cfg_class(
- **{
- k: list_to_tuple(v)
- for k, v in OmegaConf.to_container(hydra_cfg.policy, resolve=True).items()
- if k in expected_kwargs
- }
- )
- return policy_cfg
-
-
-def get_policy_and_config_classes(name: str) -> tuple[Policy, object]:
- """Get the policy's class and config class given a name (matching the policy class' `name` attribute)."""
- if name == "tdmpc":
- from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig
- from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy
-
- return TDMPCPolicy, TDMPCConfig
- elif name == "diffusion":
- from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
- from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
-
- return DiffusionPolicy, DiffusionConfig
- elif name == "act":
- from lerobot.common.policies.act.configuration_act import ACTConfig
- from lerobot.common.policies.act.modeling_act import ACTPolicy
-
- return ACTPolicy, ACTConfig
- elif name == "vqbet":
- from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
- from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy
-
- return VQBeTPolicy, VQBeTConfig
- else:
- raise NotImplementedError(f"Policy with name {name} is not implemented.")
-
-
-def make_policy(
- hydra_cfg: DictConfig, pretrained_policy_name_or_path: str | None = None, dataset_stats=None
-) -> Policy:
- """Make an instance of a policy class.
-
- Args:
- hydra_cfg: A parsed Hydra configuration (see scripts). If `pretrained_policy_name_or_path` is
- provided, only `hydra_cfg.policy.name` is used while everything else is ignored.
- pretrained_policy_name_or_path: Either the repo ID of a model hosted on the Hub or a path to a
- directory containing weights saved using `Policy.save_pretrained`. Note that providing this
- argument overrides everything in `hydra_cfg.policy` apart from `hydra_cfg.policy.name`.
- dataset_stats: Dataset statistics to use for (un)normalization of inputs/outputs in the policy. Must
- be provided when initializing a new policy, and must not be provided when loading a pretrained
- policy. Therefore, this argument is mutually exclusive with `pretrained_policy_name_or_path`.
- """
- if not (pretrained_policy_name_or_path is None) ^ (dataset_stats is None):
- raise ValueError(
- "Exactly one of `pretrained_policy_name_or_path` and `dataset_stats` must be provided."
- )
-
- policy_cls, policy_cfg_class = get_policy_and_config_classes(hydra_cfg.policy.name)
-
- policy_cfg = _policy_cfg_from_hydra_cfg(policy_cfg_class, hydra_cfg)
- if pretrained_policy_name_or_path is None:
- # Make a fresh policy.
- policy = policy_cls(policy_cfg, dataset_stats)
- else:
- # Load a pretrained policy and override the config if needed (for example, if there are inference-time
- # hyperparameters that we want to vary).
- # TODO(alexander-soare): This hack makes use of huggingface_hub's tooling to load the policy with,
- # pretrained weights which are then loaded into a fresh policy with the desired config. This PR in
- # huggingface_hub should make it possible to avoid the hack:
- # https://github.com/huggingface/huggingface_hub/pull/2274.
- policy = policy_cls(policy_cfg)
- policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict())
-
- policy.to(get_safe_torch_device(hydra_cfg.device))
-
- return policy
diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py
deleted file mode 100644
index f2e1179c0e..0000000000
--- a/lerobot/common/policies/normalize.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import torch
-from torch import Tensor, nn
-
-
-def create_stats_buffers(
- shapes: dict[str, list[int]],
- modes: dict[str, str],
- stats: dict[str, dict[str, Tensor]] | None = None,
-) -> dict[str, dict[str, nn.ParameterDict]]:
- """
- Create buffers per modality (e.g. "observation.image", "action") containing their mean, std, min, max
- statistics.
-
- Args: (see Normalize and Unnormalize)
-
- Returns:
- dict: A dictionary where keys are modalities and values are `nn.ParameterDict` containing
- `nn.Parameters` set to `requires_grad=False`, suitable to not be updated during backpropagation.
- """
- stats_buffers = {}
-
- for key, mode in modes.items():
- assert mode in ["mean_std", "min_max"]
-
- shape = tuple(shapes[key])
-
- if "image" in key:
- # sanity checks
- assert len(shape) == 3, f"number of dimensions of {key} != 3 ({shape=}"
- c, h, w = shape
- assert c < h and c < w, f"{key} is not channel first ({shape=})"
- # override image shape to be invariant to height and width
- shape = (c, 1, 1)
-
- # Note: we initialize mean, std, min, max to infinity. They should be overwritten
- # downstream by `stats` or `policy.load_state_dict`, as expected. During forward,
- # we assert they are not infinity anymore.
-
- buffer = {}
- if mode == "mean_std":
- mean = torch.ones(shape, dtype=torch.float32) * torch.inf
- std = torch.ones(shape, dtype=torch.float32) * torch.inf
- buffer = nn.ParameterDict(
- {
- "mean": nn.Parameter(mean, requires_grad=False),
- "std": nn.Parameter(std, requires_grad=False),
- }
- )
- elif mode == "min_max":
- min = torch.ones(shape, dtype=torch.float32) * torch.inf
- max = torch.ones(shape, dtype=torch.float32) * torch.inf
- buffer = nn.ParameterDict(
- {
- "min": nn.Parameter(min, requires_grad=False),
- "max": nn.Parameter(max, requires_grad=False),
- }
- )
-
- if stats is not None:
- # Note: The clone is needed to make sure that the logic in save_pretrained doesn't see duplicated
- # tensors anywhere (for example, when we use the same stats for normalization and
- # unnormalization). See the logic here
- # https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97.
- if mode == "mean_std":
- buffer["mean"].data = stats[key]["mean"].clone()
- buffer["std"].data = stats[key]["std"].clone()
- elif mode == "min_max":
- buffer["min"].data = stats[key]["min"].clone()
- buffer["max"].data = stats[key]["max"].clone()
-
- stats_buffers[key] = buffer
- return stats_buffers
-
-
-def _no_stats_error_str(name: str) -> str:
- return (
- f"`{name}` is infinity. You should either initialize with `stats` as an argument, or use a "
- "pretrained model."
- )
-
-
-class Normalize(nn.Module):
- """Normalizes data (e.g. "observation.image") for more stable and faster convergence during training."""
-
- def __init__(
- self,
- shapes: dict[str, list[int]],
- modes: dict[str, str],
- stats: dict[str, dict[str, Tensor]] | None = None,
- ):
- """
- Args:
- shapes (dict): A dictionary where keys are input modalities (e.g. "observation.image") and values
- are their shapes (e.g. `[3,96,96]`]). These shapes are used to create the tensor buffer containing
- mean, std, min, max statistics. If the provided `shapes` contain keys related to images, the shape
- is adjusted to be invariant to height and width, assuming a channel-first (c, h, w) format.
- modes (dict): A dictionary where keys are output modalities (e.g. "observation.image") and values
- are their normalization modes among:
- - "mean_std": subtract the mean and divide by standard deviation.
- - "min_max": map to [-1, 1] range.
- stats (dict, optional): A dictionary where keys are output modalities (e.g. "observation.image")
- and values are dictionaries of statistic types and their values (e.g.
- `{"mean": torch.randn(3,1,1)}, "std": torch.randn(3,1,1)}`). If provided, as expected for
- training the model for the first time, these statistics will overwrite the default buffers. If
- not provided, as expected for finetuning or evaluation, the default buffers should to be
- overwritten by a call to `policy.load_state_dict(state_dict)`. That way, initializing the
- dataset is not needed to get the stats, since they are already in the policy state_dict.
- """
- super().__init__()
- self.shapes = shapes
- self.modes = modes
- self.stats = stats
- stats_buffers = create_stats_buffers(shapes, modes, stats)
- for key, buffer in stats_buffers.items():
- setattr(self, "buffer_" + key.replace(".", "_"), buffer)
-
- # TODO(rcadene): should we remove torch.no_grad?
- @torch.no_grad
- def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
- batch = dict(batch) # shallow copy avoids mutating the input batch
- for key, mode in self.modes.items():
- buffer = getattr(self, "buffer_" + key.replace(".", "_"))
-
- if mode == "mean_std":
- mean = buffer["mean"]
- std = buffer["std"]
- assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
- assert not torch.isinf(std).any(), _no_stats_error_str("std")
- batch[key] = (batch[key] - mean) / (std + 1e-8)
- elif mode == "min_max":
- min = buffer["min"]
- max = buffer["max"]
- assert not torch.isinf(min).any(), _no_stats_error_str("min")
- assert not torch.isinf(max).any(), _no_stats_error_str("max")
- # normalize to [0,1]
- batch[key] = (batch[key] - min) / (max - min + 1e-8)
- # normalize to [-1, 1]
- batch[key] = batch[key] * 2 - 1
- else:
- raise ValueError(mode)
- return batch
-
-
-class Unnormalize(nn.Module):
- """
- Similar to `Normalize` but unnormalizes output data (e.g. `{"action": torch.randn(b,c)}`) in their
- original range used by the environment.
- """
-
- def __init__(
- self,
- shapes: dict[str, list[int]],
- modes: dict[str, str],
- stats: dict[str, dict[str, Tensor]] | None = None,
- ):
- """
- Args:
- shapes (dict): A dictionary where keys are input modalities (e.g. "observation.image") and values
- are their shapes (e.g. `[3,96,96]`]). These shapes are used to create the tensor buffer containing
- mean, std, min, max statistics. If the provided `shapes` contain keys related to images, the shape
- is adjusted to be invariant to height and width, assuming a channel-first (c, h, w) format.
- modes (dict): A dictionary where keys are output modalities (e.g. "observation.image") and values
- are their normalization modes among:
- - "mean_std": subtract the mean and divide by standard deviation.
- - "min_max": map to [-1, 1] range.
- stats (dict, optional): A dictionary where keys are output modalities (e.g. "observation.image")
- and values are dictionaries of statistic types and their values (e.g.
- `{"mean": torch.randn(3,1,1)}, "std": torch.randn(3,1,1)}`). If provided, as expected for
- training the model for the first time, these statistics will overwrite the default buffers. If
- not provided, as expected for finetuning or evaluation, the default buffers should to be
- overwritten by a call to `policy.load_state_dict(state_dict)`. That way, initializing the
- dataset is not needed to get the stats, since they are already in the policy state_dict.
- """
- super().__init__()
- self.shapes = shapes
- self.modes = modes
- self.stats = stats
- # `self.buffer_observation_state["mean"]` contains `torch.tensor(state_dim)`
- stats_buffers = create_stats_buffers(shapes, modes, stats)
- for key, buffer in stats_buffers.items():
- setattr(self, "buffer_" + key.replace(".", "_"), buffer)
-
- # TODO(rcadene): should we remove torch.no_grad?
- @torch.no_grad
- def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
- batch = dict(batch) # shallow copy avoids mutating the input batch
- for key, mode in self.modes.items():
- buffer = getattr(self, "buffer_" + key.replace(".", "_"))
-
- if mode == "mean_std":
- mean = buffer["mean"]
- std = buffer["std"]
- assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
- assert not torch.isinf(std).any(), _no_stats_error_str("std")
- batch[key] = batch[key] * std + mean
- elif mode == "min_max":
- min = buffer["min"]
- max = buffer["max"]
- assert not torch.isinf(min).any(), _no_stats_error_str("min")
- assert not torch.isinf(max).any(), _no_stats_error_str("max")
- batch[key] = (batch[key] + 1) / 2
- batch[key] = batch[key] * (max - min) + min
- else:
- raise ValueError(mode)
- return batch
diff --git a/lerobot/common/policies/policy_protocol.py b/lerobot/common/policies/policy_protocol.py
deleted file mode 100644
index 4e9e87afd4..0000000000
--- a/lerobot/common/policies/policy_protocol.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""A protocol that all policies should follow.
-
-This provides a mechanism for type-hinting and isinstance checks without requiring the policies classes
-subclass a base class.
-
-The protocol structure, method signatures, and docstrings should be used by developers as a reference for
-how to implement new policies.
-"""
-
-from typing import Protocol, runtime_checkable
-
-from torch import Tensor
-
-
-@runtime_checkable
-class Policy(Protocol):
- """The required interface for implementing a policy.
-
- We also expect all policies to subclass torch.nn.Module and PyTorchModelHubMixin.
- """
-
- name: str
-
- def __init__(self, cfg, dataset_stats: dict[str, dict[str, Tensor]] | None = None):
- """
- Args:
- cfg: Policy configuration class instance or None, in which case the default instantiation of the
- configuration class is used.
- dataset_stats: Dataset statistics to be used for normalization.
- """
-
- def reset(self):
- """To be called whenever the environment is reset.
-
- Does things like clearing caches.
- """
-
- def forward(self, batch: dict[str, Tensor]) -> dict:
- """Run the batch through the model and compute the loss for training or validation.
-
- Returns a dictionary with "loss" and potentially other information. Apart from "loss" which is a Tensor, all
- other items should be logging-friendly, native Python types.
- """
-
- def select_action(self, batch: dict[str, Tensor]) -> Tensor:
- """Return one action to run in the environment (potentially in batch mode).
-
- When the model uses a history of observations, or outputs a sequence of actions, this method deals
- with caching.
- """
-
-
-@runtime_checkable
-class PolicyWithUpdate(Policy, Protocol):
- def update(self):
- """An update method that is to be called after a training optimization step.
-
- Implements an additional updates the model parameters may need (for example, doing an EMA step for a
- target model, or incrementing an internal buffer).
- """
diff --git a/lerobot/common/policies/utils.py b/lerobot/common/policies/utils.py
deleted file mode 100644
index 5a62daa2a7..0000000000
--- a/lerobot/common/policies/utils.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import torch
-from torch import nn
-
-
-def populate_queues(queues, batch):
- for key in batch:
- # Ignore keys not in the queues already (leaving the responsibility to the caller to make sure the
- # queues have the keys they want).
- if key not in queues:
- continue
- if len(queues[key]) != queues[key].maxlen:
- # initialize by copying the first observation several times until the queue is full
- while len(queues[key]) != queues[key].maxlen:
- queues[key].append(batch[key])
- else:
- # add latest observation to the queue
- queues[key].append(batch[key])
- return queues
-
-
-def get_device_from_parameters(module: nn.Module) -> torch.device:
- """Get a module's device by checking one of its parameters.
-
- Note: assumes that all parameters have the same device
- """
- return next(iter(module.parameters())).device
-
-
-def get_dtype_from_parameters(module: nn.Module) -> torch.dtype:
- """Get a module's parameter dtype by checking one of its parameters.
-
- Note: assumes that all parameters have the same dtype.
- """
- return next(iter(module.parameters())).dtype
diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py
deleted file mode 100644
index dfe4684d26..0000000000
--- a/lerobot/common/policies/vqbet/configuration_vqbet.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru
-# and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto
-# and The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from dataclasses import dataclass, field
-
-
-@dataclass
-class VQBeTConfig:
- """Configuration class for VQ-BeT.
-
- Defaults are configured for training with PushT providing proprioceptive and single camera observations.
-
- The parameters you will most likely need to change are the ones which depend on the environment / sensors.
- Those are: `input_shapes` and `output_shapes`.
-
- Notes on the inputs and outputs:
- - "observation.state" is required as an input key.
- - At least one key starting with "observation.image is required as an input.
- - If there are multiple keys beginning with "observation.image" they are treated as multiple camera
- views. Right now we only support all images having the same shape.
- - "action" is required as an output key.
-
- Args:
- n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
- current step and additional steps going back).
- n_action_pred_token: Total number of current token and future tokens that VQ-BeT predicts.
- action_chunk_size: Action chunk size of each action prediction token.
- input_shapes: A dictionary defining the shapes of the input data for the policy.
- The key represents the input data name, and the value is a list indicating the dimensions
- of the corresponding data. For example, "observation.image" refers to an input from
- a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution.
- Importantly, shapes doesnt include batch dimension or temporal dimension.
- output_shapes: A dictionary defining the shapes of the output data for the policy.
- The key represents the output data name, and the value is a list indicating the dimensions
- of the corresponding data. For example, "action" refers to an output shape of [14], indicating
- 14-dimensional actions. Importantly, shapes doesnt include batch dimension or temporal dimension.
- input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"),
- and the value specifies the normalization mode to apply. The two available modes are "mean_std"
- which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a
- [-1, 1] range.
- output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
- original scale. Note that this is also used for normalizing the training targets.
- vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
- crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit
- within the image size. If None, no cropping is done.
- crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
- mode).
- pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
- `None` means no pretrained weights.
- use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
- The group sizes are set to be about 16 (to be precise, feature_dim // 16).
- spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax.
- n_vqvae_training_steps: Number of optimization steps for training Residual VQ.
- vqvae_n_embed: Number of embedding vectors in the RVQ dictionary (each layer).
- vqvae_embedding_dim: Dimension of each embedding vector in the RVQ dictionary.
- vqvae_enc_hidden_dim: Size of hidden dimensions of Encoder / Decoder part of Residaul VQ-VAE
- gpt_block_size: Max block size of minGPT (should be larger than the number of input tokens)
- gpt_input_dim: Size of output input of GPT. This is also used as the dimension of observation features.
- gpt_output_dim: Size of output dimension of GPT. This is also used as a input dimension of offset / bin prediction headers.
- gpt_n_layer: Number of layers of GPT
- gpt_n_head: Number of headers of GPT
- gpt_hidden_dim: Size of hidden dimensions of GPT
- dropout: Dropout rate for GPT
- mlp_hidden_dim: Size of hidden dimensions of offset header / bin prediction headers parts of VQ-BeT
- offset_loss_weight: A constant that is multiplied to the offset loss
- primary_code_loss_weight: A constant that is multiplied to the primary code prediction loss
- secondary_code_loss_weight: A constant that is multiplied to the secondary code prediction loss
- bet_softmax_temperature: Sampling temperature of code for rollout with VQ-BeT
- sequentially_select: Whether select code of primary / secondary as sequentially (pick primary code,
- and then select secodnary code), or at the same time.
- """
-
- # Inputs / output structure.
- n_obs_steps: int = 5
- n_action_pred_token: int = 3
- action_chunk_size: int = 5
-
- input_shapes: dict[str, list[int]] = field(
- default_factory=lambda: {
- "observation.image": [3, 96, 96],
- "observation.state": [2],
- }
- )
- output_shapes: dict[str, list[int]] = field(
- default_factory=lambda: {
- "action": [2],
- }
- )
-
- # Normalization / Unnormalization
- input_normalization_modes: dict[str, str] = field(
- default_factory=lambda: {
- "observation.image": "mean_std",
- "observation.state": "min_max",
- }
- )
- output_normalization_modes: dict[str, str] = field(default_factory=lambda: {"action": "min_max"})
-
- # Architecture / modeling.
- # Vision backbone.
- vision_backbone: str = "resnet18"
- crop_shape: tuple[int, int] | None = (84, 84)
- crop_is_random: bool = True
- pretrained_backbone_weights: str | None = None
- use_group_norm: bool = True
- spatial_softmax_num_keypoints: int = 32
- # VQ-VAE
- n_vqvae_training_steps: int = 20000
- vqvae_n_embed: int = 16
- vqvae_embedding_dim: int = 256
- vqvae_enc_hidden_dim: int = 128
- # VQ-BeT
- gpt_block_size: int = 500
- gpt_input_dim: int = 512
- gpt_output_dim: int = 512
- gpt_n_layer: int = 8
- gpt_n_head: int = 8
- gpt_hidden_dim: int = 512
- dropout: float = 0.1
- mlp_hidden_dim: int = 1024
- offset_loss_weight: float = 10000.0
- primary_code_loss_weight: float = 5.0
- secondary_code_loss_weight: float = 0.5
- bet_softmax_temperature: float = 0.1
- sequentially_select: bool = False
-
- def __post_init__(self):
- """Input validation (not exhaustive)."""
- if not self.vision_backbone.startswith("resnet"):
- raise ValueError(
- f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
- )
- image_keys = {k for k in self.input_shapes if k.startswith("observation.image")}
- if self.crop_shape is not None:
- for image_key in image_keys:
- if (
- self.crop_shape[0] > self.input_shapes[image_key][1]
- or self.crop_shape[1] > self.input_shapes[image_key][2]
- ):
- raise ValueError(
- f"`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} "
- f"for `crop_shape` and {self.input_shapes[image_key]} for "
- "`input_shapes[{image_key}]`."
- )
- # Check that all input images have the same shape.
- first_image_key = next(iter(image_keys))
- for image_key in image_keys:
- if self.input_shapes[image_key] != self.input_shapes[first_image_key]:
- raise ValueError(
- f"`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we "
- "expect all image shapes to match."
- )
diff --git a/lerobot/common/robot_devices/cameras/intelrealsense.py b/lerobot/common/robot_devices/cameras/intelrealsense.py
deleted file mode 100644
index 84ac540f2c..0000000000
--- a/lerobot/common/robot_devices/cameras/intelrealsense.py
+++ /dev/null
@@ -1,561 +0,0 @@
-"""
-This file contains utilities for recording frames from Intel Realsense cameras.
-"""
-
-import argparse
-import concurrent.futures
-import logging
-import math
-import shutil
-import threading
-import time
-import traceback
-from collections import Counter
-from dataclasses import dataclass, replace
-from pathlib import Path
-from threading import Thread
-
-import numpy as np
-from PIL import Image
-
-from lerobot.common.robot_devices.utils import (
- RobotDeviceAlreadyConnectedError,
- RobotDeviceNotConnectedError,
- busy_wait,
-)
-from lerobot.common.utils.utils import capture_timestamp_utc
-
-SERIAL_NUMBER_INDEX = 1
-
-
-def find_cameras(raise_when_empty=True, mock=False) -> list[dict]:
- """
- Find the names and the serial numbers of the Intel RealSense cameras
- connected to the computer.
- """
- if mock:
- import tests.mock_pyrealsense2 as rs
- else:
- import pyrealsense2 as rs
-
- cameras = []
- for device in rs.context().query_devices():
- serial_number = int(device.get_info(rs.camera_info(SERIAL_NUMBER_INDEX)))
- name = device.get_info(rs.camera_info.name)
- cameras.append(
- {
- "serial_number": serial_number,
- "name": name,
- }
- )
-
- if raise_when_empty and len(cameras) == 0:
- raise OSError(
- "Not a single camera was detected. Try re-plugging, or re-installing `librealsense` and its python wrapper `pyrealsense2`, or updating the firmware."
- )
-
- return cameras
-
-
-def save_image(img_array, serial_number, frame_index, images_dir):
- try:
- img = Image.fromarray(img_array)
- path = images_dir / f"camera_{serial_number}_frame_{frame_index:06d}.png"
- path.parent.mkdir(parents=True, exist_ok=True)
- img.save(str(path), quality=100)
- logging.info(f"Saved image: {path}")
- except Exception as e:
- logging.error(f"Failed to save image for camera {serial_number} frame {frame_index}: {e}")
-
-
-def save_images_from_cameras(
- images_dir: Path,
- serial_numbers: list[int] | None = None,
- fps=None,
- width=None,
- height=None,
- record_time_s=2,
- mock=False,
-):
- """
- Initializes all the cameras and saves images to the directory. Useful to visually identify the camera
- associated to a given serial number.
- """
- if serial_numbers is None or len(serial_numbers) == 0:
- camera_infos = find_cameras(mock=mock)
- serial_numbers = [cam["serial_number"] for cam in camera_infos]
-
- if mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- print("Connecting cameras")
- cameras = []
- for cam_sn in serial_numbers:
- print(f"{cam_sn=}")
- camera = IntelRealSenseCamera(cam_sn, fps=fps, width=width, height=height, mock=mock)
- camera.connect()
- print(
- f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})"
- )
- cameras.append(camera)
-
- images_dir = Path(images_dir)
- if images_dir.exists():
- shutil.rmtree(
- images_dir,
- )
- images_dir.mkdir(parents=True, exist_ok=True)
-
- print(f"Saving images to {images_dir}")
- frame_index = 0
- start_time = time.perf_counter()
- try:
- with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
- while True:
- now = time.perf_counter()
-
- for camera in cameras:
- # If we use async_read when fps is None, the loop will go full speed, and we will end up
- # saving the same images from the cameras multiple times until the RAM/disk is full.
- image = camera.read() if fps is None else camera.async_read()
- if image is None:
- print("No Frame")
-
- bgr_converted_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
-
- executor.submit(
- save_image,
- bgr_converted_image,
- camera.serial_number,
- frame_index,
- images_dir,
- )
-
- if fps is not None:
- dt_s = time.perf_counter() - now
- busy_wait(1 / fps - dt_s)
-
- if time.perf_counter() - start_time > record_time_s:
- break
-
- print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}")
-
- frame_index += 1
- finally:
- print(f"Images have been saved to {images_dir}")
- for camera in cameras:
- camera.disconnect()
-
-
-@dataclass
-class IntelRealSenseCameraConfig:
- """
- Example of tested options for Intel Real Sense D405:
-
- ```python
- IntelRealSenseCameraConfig(30, 640, 480)
- IntelRealSenseCameraConfig(60, 640, 480)
- IntelRealSenseCameraConfig(90, 640, 480)
- IntelRealSenseCameraConfig(30, 1280, 720)
- IntelRealSenseCameraConfig(30, 640, 480, use_depth=True)
- IntelRealSenseCameraConfig(30, 640, 480, rotation=90)
- ```
- """
-
- fps: int | None = None
- width: int | None = None
- height: int | None = None
- color_mode: str = "rgb"
- channels: int | None = None
- use_depth: bool = False
- force_hardware_reset: bool = True
- rotation: int | None = None
- mock: bool = False
-
- def __post_init__(self):
- if self.color_mode not in ["rgb", "bgr"]:
- raise ValueError(
- f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
- )
-
- self.channels = 3
-
- at_least_one_is_not_none = self.fps is not None or self.width is not None or self.height is not None
- at_least_one_is_none = self.fps is None or self.width is None or self.height is None
- if at_least_one_is_not_none and at_least_one_is_none:
- raise ValueError(
- "For `fps`, `width` and `height`, either all of them need to be set, or none of them, "
- f"but {self.fps=}, {self.width=}, {self.height=} were provided."
- )
-
- if self.rotation not in [-90, None, 90, 180]:
- raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})")
-
-
-class IntelRealSenseCamera:
- """
- The IntelRealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras:
- - is instantiated with the serial number of the camera - won't randomly change as it can be the case of OpenCVCamera for Linux,
- - can also be instantiated with the camera's name — if it's unique — using IntelRealSenseCamera.init_from_name(),
- - depth map can be returned.
-
- To find the camera indices of your cameras, you can run our utility script that will save a few frames for each camera:
- ```bash
- python lerobot/common/robot_devices/cameras/intelrealsense.py --images-dir outputs/images_from_intelrealsense_cameras
- ```
-
- When an IntelRealSenseCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode
- of the given camera will be used.
-
- Example of usage:
- ```python
- # Instantiate with its serial number
- camera = IntelRealSenseCamera(128422271347)
- # Or by its name if it's unique
- camera = IntelRealSenseCamera.init_from_name("Intel RealSense D405")
- camera.connect()
- color_image = camera.read()
- # when done using the camera, consider disconnecting
- camera.disconnect()
- ```
-
- Example of changing default fps, width, height and color_mode:
- ```python
- camera = IntelRealSenseCamera(serial_number, fps=30, width=1280, height=720)
- camera = connect() # applies the settings, might error out if these settings are not compatible with the camera
-
- camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480)
- camera = connect()
-
- camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480, color_mode="bgr")
- camera = connect()
- ```
-
- Example of returning depth:
- ```python
- camera = IntelRealSenseCamera(serial_number, use_depth=True)
- camera.connect()
- color_image, depth_map = camera.read()
- ```
- """
-
- def __init__(
- self,
- serial_number: int,
- config: IntelRealSenseCameraConfig | None = None,
- **kwargs,
- ):
- if config is None:
- config = IntelRealSenseCameraConfig()
-
- # Overwrite the config arguments using kwargs
- config = replace(config, **kwargs)
-
- self.serial_number = serial_number
- self.fps = config.fps
- self.width = config.width
- self.height = config.height
- self.channels = config.channels
- self.color_mode = config.color_mode
- self.use_depth = config.use_depth
- self.force_hardware_reset = config.force_hardware_reset
- self.mock = config.mock
-
- self.camera = None
- self.is_connected = False
- self.thread = None
- self.stop_event = None
- self.color_image = None
- self.depth_map = None
- self.logs = {}
-
- if self.mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- # TODO(alibets): Do we keep original width/height or do we define them after rotation?
- self.rotation = None
- if config.rotation == -90:
- self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
- elif config.rotation == 90:
- self.rotation = cv2.ROTATE_90_CLOCKWISE
- elif config.rotation == 180:
- self.rotation = cv2.ROTATE_180
-
- @classmethod
- def init_from_name(cls, name: str, config: IntelRealSenseCameraConfig | None = None, **kwargs):
- camera_infos = find_cameras()
- camera_names = [cam["name"] for cam in camera_infos]
- this_name_count = Counter(camera_names)[name]
- if this_name_count > 1:
- # TODO(aliberts): Test this with multiple identical cameras (Aloha)
- raise ValueError(
- f"Multiple {name} cameras have been detected. Please use their serial number to instantiate them."
- )
-
- name_to_serial_dict = {cam["name"]: cam["serial_number"] for cam in camera_infos}
- cam_sn = name_to_serial_dict[name]
-
- if config is None:
- config = IntelRealSenseCameraConfig()
-
- # Overwrite the config arguments using kwargs
- config = replace(config, **kwargs)
-
- return cls(serial_number=cam_sn, config=config, **kwargs)
-
- def connect(self):
- if self.is_connected:
- raise RobotDeviceAlreadyConnectedError(
- f"IntelRealSenseCamera({self.serial_number}) is already connected."
- )
-
- if self.mock:
- import tests.mock_pyrealsense2 as rs
- else:
- import pyrealsense2 as rs
-
- config = rs.config()
- config.enable_device(str(self.serial_number))
-
- if self.fps and self.width and self.height:
- # TODO(rcadene): can we set rgb8 directly?
- config.enable_stream(rs.stream.color, self.width, self.height, rs.format.rgb8, self.fps)
- else:
- config.enable_stream(rs.stream.color)
-
- if self.use_depth:
- if self.fps and self.width and self.height:
- config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps)
- else:
- config.enable_stream(rs.stream.depth)
-
- self.camera = rs.pipeline()
- try:
- profile = self.camera.start(config)
- is_camera_open = True
- except RuntimeError:
- is_camera_open = False
- traceback.print_exc()
-
- # If the camera doesn't work, display the camera indices corresponding to
- # valid cameras.
- if not is_camera_open:
- # Verify that the provided `serial_number` is valid before printing the traceback
- camera_infos = find_cameras()
- serial_numbers = [cam["serial_number"] for cam in camera_infos]
- if self.serial_number not in serial_numbers:
- raise ValueError(
- f"`serial_number` is expected to be one of these available cameras {serial_numbers}, but {self.serial_number} is provided instead. "
- "To find the serial number you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`."
- )
-
- raise OSError(f"Can't access IntelRealSenseCamera({self.serial_number}).")
-
- color_stream = profile.get_stream(rs.stream.color)
- color_profile = color_stream.as_video_stream_profile()
- actual_fps = color_profile.fps()
- actual_width = color_profile.width()
- actual_height = color_profile.height()
-
- # Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30)
- if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
- # Using `OSError` since it's a broad that encompasses issues related to device communication
- raise OSError(
- f"Can't set {self.fps=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_fps}."
- )
- if self.width is not None and self.width != actual_width:
- raise OSError(
- f"Can't set {self.width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}."
- )
- if self.height is not None and self.height != actual_height:
- raise OSError(
- f"Can't set {self.height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}."
- )
-
- self.fps = round(actual_fps)
- self.width = round(actual_width)
- self.height = round(actual_height)
-
- self.is_connected = True
-
- def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
- """Read a frame from the camera returned in the format height x width x channels (e.g. 480 x 640 x 3)
- of type `np.uint8`, contrarily to the pytorch format which is float channel first.
-
- When `use_depth=True`, returns a tuple `(color_image, depth_map)` with a depth map in the format
- height x width (e.g. 480 x 640) of type np.uint16.
-
- Note: Reading a frame is done every `camera.fps` times per second, and it is blocking.
- If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
- """
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
- )
-
- if self.mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- start_time = time.perf_counter()
-
- frame = self.camera.wait_for_frames(timeout_ms=5000)
-
- color_frame = frame.get_color_frame()
-
- if not color_frame:
- raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.serial_number}).")
-
- color_image = np.asanyarray(color_frame.get_data())
-
- requested_color_mode = self.color_mode if temporary_color is None else temporary_color
- if requested_color_mode not in ["rgb", "bgr"]:
- raise ValueError(
- f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided."
- )
-
- # IntelRealSense uses RGB format as default (red, green, blue).
- if requested_color_mode == "bgr":
- color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
-
- h, w, _ = color_image.shape
- if h != self.height or w != self.width:
- raise OSError(
- f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
- )
-
- if self.rotation is not None:
- color_image = cv2.rotate(color_image, self.rotation)
-
- # log the number of seconds it took to read the image
- self.logs["delta_timestamp_s"] = time.perf_counter() - start_time
-
- # log the utc time at which the image was received
- self.logs["timestamp_utc"] = capture_timestamp_utc()
-
- if self.use_depth:
- depth_frame = frame.get_depth_frame()
- if not depth_frame:
- raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.serial_number}).")
-
- depth_map = np.asanyarray(depth_frame.get_data())
-
- h, w = depth_map.shape
- if h != self.height or w != self.width:
- raise OSError(
- f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
- )
-
- if self.rotation is not None:
- depth_map = cv2.rotate(depth_map, self.rotation)
-
- return color_image, depth_map
- else:
- return color_image
-
- def read_loop(self):
- while not self.stop_event.is_set():
- if self.use_depth:
- self.color_image, self.depth_map = self.read()
- else:
- self.color_image = self.read()
-
- def async_read(self):
- """Access the latest color image"""
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
- )
-
- if self.thread is None:
- self.stop_event = threading.Event()
- self.thread = Thread(target=self.read_loop, args=())
- self.thread.daemon = True
- self.thread.start()
-
- num_tries = 0
- while self.color_image is None:
- # TODO(rcadene, aliberts): intelrealsense has diverged compared to opencv over here
- num_tries += 1
- time.sleep(1 / self.fps)
- if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()):
- raise Exception(
- "The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called."
- )
-
- if self.use_depth:
- return self.color_image, self.depth_map
- else:
- return self.color_image
-
- def disconnect(self):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
- )
-
- if self.thread is not None and self.thread.is_alive():
- # wait for the thread to finish
- self.stop_event.set()
- self.thread.join()
- self.thread = None
- self.stop_event = None
-
- self.camera.stop()
- self.camera = None
-
- self.is_connected = False
-
- def __del__(self):
- if getattr(self, "is_connected", False):
- self.disconnect()
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="Save a few frames using `IntelRealSenseCamera` for all cameras connected to the computer, or a selected subset."
- )
- parser.add_argument(
- "--serial-numbers",
- type=int,
- nargs="*",
- default=None,
- help="List of serial numbers used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.",
- )
- parser.add_argument(
- "--fps",
- type=int,
- default=30,
- help="Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.",
- )
- parser.add_argument(
- "--width",
- type=str,
- default=640,
- help="Set the width for all cameras. If not provided, use the default width of each camera.",
- )
- parser.add_argument(
- "--height",
- type=str,
- default=480,
- help="Set the height for all cameras. If not provided, use the default height of each camera.",
- )
- parser.add_argument(
- "--images-dir",
- type=Path,
- default="outputs/images_from_intelrealsense_cameras",
- help="Set directory to save a few frames for each camera.",
- )
- parser.add_argument(
- "--record-time-s",
- type=float,
- default=2.0,
- help="Set the number of seconds used to record the frames. By default, 2 seconds.",
- )
- args = parser.parse_args()
- save_images_from_cameras(**vars(args))
diff --git a/lerobot/common/robot_devices/cameras/opencv.py b/lerobot/common/robot_devices/cameras/opencv.py
deleted file mode 100644
index d284cf55a9..0000000000
--- a/lerobot/common/robot_devices/cameras/opencv.py
+++ /dev/null
@@ -1,520 +0,0 @@
-"""
-This file contains utilities for recording frames from cameras. For more info look at `OpenCVCamera` docstring.
-"""
-
-import argparse
-import concurrent.futures
-import math
-import platform
-import shutil
-import threading
-import time
-from dataclasses import dataclass, replace
-from pathlib import Path
-from threading import Thread
-
-import numpy as np
-from PIL import Image
-
-from lerobot.common.robot_devices.utils import (
- RobotDeviceAlreadyConnectedError,
- RobotDeviceNotConnectedError,
- busy_wait,
-)
-from lerobot.common.utils.utils import capture_timestamp_utc
-
-# The maximum opencv device index depends on your operating system. For instance,
-# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case
-# on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23.
-# When you change the USB port or reboot the computer, the operating system might
-# treat the same cameras as new devices. Thus we select a higher bound to search indices.
-MAX_OPENCV_INDEX = 60
-
-
-def find_cameras(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX, mock=False) -> list[dict]:
- cameras = []
- if platform.system() == "Linux":
- print("Linux detected. Finding available camera indices through scanning '/dev/video*' ports")
- possible_ports = [str(port) for port in Path("/dev").glob("video*")]
- ports = _find_cameras(possible_ports, mock=mock)
- for port in ports:
- cameras.append(
- {
- "port": port,
- "index": int(port.removeprefix("/dev/video")),
- }
- )
- else:
- print(
- "Mac or Windows detected. Finding available camera indices through "
- f"scanning all indices from 0 to {MAX_OPENCV_INDEX}"
- )
- possible_indices = range(max_index_search_range)
- indices = _find_cameras(possible_indices, mock=mock)
- for index in indices:
- cameras.append(
- {
- "port": None,
- "index": index,
- }
- )
-
- return cameras
-
-
-def _find_cameras(
- possible_camera_ids: list[int | str], raise_when_empty=False, mock=False
-) -> list[int | str]:
- if mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- camera_ids = []
- for camera_idx in possible_camera_ids:
- camera = cv2.VideoCapture(camera_idx)
- is_open = camera.isOpened()
- camera.release()
-
- if is_open:
- print(f"Camera found at index {camera_idx}")
- camera_ids.append(camera_idx)
-
- if raise_when_empty and len(camera_ids) == 0:
- raise OSError(
- "Not a single camera was detected. Try re-plugging, or re-installing `opencv2`, "
- "or your camera driver, or make sure your camera is compatible with opencv2."
- )
-
- return camera_ids
-
-
-def is_valid_unix_path(path: str) -> bool:
- """Note: if 'path' points to a symlink, this will return True only if the target exists"""
- p = Path(path)
- return p.is_absolute() and p.exists()
-
-
-def get_camera_index_from_unix_port(port: Path) -> int:
- return int(str(port.resolve()).removeprefix("/dev/video"))
-
-
-def save_image(img_array, camera_index, frame_index, images_dir):
- img = Image.fromarray(img_array)
- path = images_dir / f"camera_{camera_index:02d}_frame_{frame_index:06d}.png"
- path.parent.mkdir(parents=True, exist_ok=True)
- img.save(str(path), quality=100)
-
-
-def save_images_from_cameras(
- images_dir: Path,
- camera_ids: list | None = None,
- fps=None,
- width=None,
- height=None,
- record_time_s=2,
- mock=False,
-):
- """
- Initializes all the cameras and saves images to the directory. Useful to visually identify the camera
- associated to a given camera index.
- """
- if camera_ids is None or len(camera_ids) == 0:
- camera_infos = find_cameras(mock=mock)
- camera_ids = [cam["index"] for cam in camera_infos]
-
- print("Connecting cameras")
- cameras = []
- for cam_idx in camera_ids:
- camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height, mock=mock)
- camera.connect()
- print(
- f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, "
- f"height={camera.height}, color_mode={camera.color_mode})"
- )
- cameras.append(camera)
-
- images_dir = Path(images_dir)
- if images_dir.exists():
- shutil.rmtree(
- images_dir,
- )
- images_dir.mkdir(parents=True, exist_ok=True)
-
- print(f"Saving images to {images_dir}")
- frame_index = 0
- start_time = time.perf_counter()
- with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
- while True:
- now = time.perf_counter()
-
- for camera in cameras:
- # If we use async_read when fps is None, the loop will go full speed, and we will endup
- # saving the same images from the cameras multiple times until the RAM/disk is full.
- image = camera.read() if fps is None else camera.async_read()
-
- executor.submit(
- save_image,
- image,
- camera.camera_index,
- frame_index,
- images_dir,
- )
-
- if fps is not None:
- dt_s = time.perf_counter() - now
- busy_wait(1 / fps - dt_s)
-
- print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}")
-
- if time.perf_counter() - start_time > record_time_s:
- break
-
- frame_index += 1
-
- print(f"Images have been saved to {images_dir}")
-
-
-@dataclass
-class OpenCVCameraConfig:
- """
- Example of tested options for Intel Real Sense D405:
-
- ```python
- OpenCVCameraConfig(30, 640, 480)
- OpenCVCameraConfig(60, 640, 480)
- OpenCVCameraConfig(90, 640, 480)
- OpenCVCameraConfig(30, 1280, 720)
- ```
- """
-
- fps: int | None = None
- width: int | None = None
- height: int | None = None
- color_mode: str = "rgb"
- channels: int | None = None
- rotation: int | None = None
- mock: bool = False
-
- def __post_init__(self):
- if self.color_mode not in ["rgb", "bgr"]:
- raise ValueError(
- f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
- )
-
- self.channels = 3
-
- if self.rotation not in [-90, None, 90, 180]:
- raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})")
-
-
-class OpenCVCamera:
- """
- The OpenCVCamera class allows to efficiently record images from cameras. It relies on opencv2 to communicate
- with the cameras. Most cameras are compatible. For more info, see the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
-
- An OpenCVCamera instance requires a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera
- like a webcam of a laptop, the camera index is expected to be 0, but it might also be very different, and the camera index
- might change if you reboot your computer or re-plug your camera. This behavior depends on your operation system.
-
- To find the camera indices of your cameras, you can run our utility script that will be save a few frames for each camera:
- ```bash
- python lerobot/common/robot_devices/cameras/opencv.py --images-dir outputs/images_from_opencv_cameras
- ```
-
- When an OpenCVCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode
- of the given camera will be used.
-
- Example of usage:
- ```python
- camera = OpenCVCamera(camera_index=0)
- camera.connect()
- color_image = camera.read()
- # when done using the camera, consider disconnecting
- camera.disconnect()
- ```
-
- Example of changing default fps, width, height and color_mode:
- ```python
- camera = OpenCVCamera(0, fps=30, width=1280, height=720)
- camera = connect() # applies the settings, might error out if these settings are not compatible with the camera
-
- camera = OpenCVCamera(0, fps=90, width=640, height=480)
- camera = connect()
-
- camera = OpenCVCamera(0, fps=90, width=640, height=480, color_mode="bgr")
- camera = connect()
- ```
- """
-
- def __init__(self, camera_index: int | str, config: OpenCVCameraConfig | None = None, **kwargs):
- if config is None:
- config = OpenCVCameraConfig()
-
- # Overwrite config arguments using kwargs
- config = replace(config, **kwargs)
-
- self.camera_index = camera_index
- self.port = None
-
- # Linux uses ports for connecting to cameras
- if platform.system() == "Linux":
- if isinstance(self.camera_index, int):
- self.port = Path(f"/dev/video{self.camera_index}")
- elif isinstance(self.camera_index, str) and is_valid_unix_path(self.camera_index):
- self.port = Path(self.camera_index)
- # Retrieve the camera index from a potentially symlinked path
- self.camera_index = get_camera_index_from_unix_port(self.port)
- else:
- raise ValueError(f"Please check the provided camera_index: {camera_index}")
-
- self.fps = config.fps
- self.width = config.width
- self.height = config.height
- self.channels = config.channels
- self.color_mode = config.color_mode
- self.mock = config.mock
-
- self.camera = None
- self.is_connected = False
- self.thread = None
- self.stop_event = None
- self.color_image = None
- self.logs = {}
-
- if self.mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- # TODO(aliberts): Do we keep original width/height or do we define them after rotation?
- self.rotation = None
- if config.rotation == -90:
- self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
- elif config.rotation == 90:
- self.rotation = cv2.ROTATE_90_CLOCKWISE
- elif config.rotation == 180:
- self.rotation = cv2.ROTATE_180
-
- def connect(self):
- if self.is_connected:
- raise RobotDeviceAlreadyConnectedError(f"OpenCVCamera({self.camera_index}) is already connected.")
-
- if self.mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- # Use 1 thread to avoid blocking the main thread. Especially useful during data collection
- # when other threads are used to save the images.
- cv2.setNumThreads(1)
-
- camera_idx = f"/dev/video{self.camera_index}" if platform.system() == "Linux" else self.camera_index
- # First create a temporary camera trying to access `camera_index`,
- # and verify it is a valid camera by calling `isOpened`.
- tmp_camera = cv2.VideoCapture(camera_idx)
- is_camera_open = tmp_camera.isOpened()
- # Release camera to make it accessible for `find_camera_indices`
- tmp_camera.release()
- del tmp_camera
-
- # If the camera doesn't work, display the camera indices corresponding to
- # valid cameras.
- if not is_camera_open:
- # Verify that the provided `camera_index` is valid before printing the traceback
- cameras_info = find_cameras()
- available_cam_ids = [cam["index"] for cam in cameras_info]
- if self.camera_index not in available_cam_ids:
- raise ValueError(
- f"`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. "
- "To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/opencv.py`."
- )
-
- raise OSError(f"Can't access OpenCVCamera({camera_idx}).")
-
- # Secondly, create the camera that will be used downstream.
- # Note: For some unknown reason, calling `isOpened` blocks the camera which then
- # needs to be re-created.
- self.camera = cv2.VideoCapture(camera_idx)
-
- if self.fps is not None:
- self.camera.set(cv2.CAP_PROP_FPS, self.fps)
- if self.width is not None:
- self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
- if self.height is not None:
- self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
-
- actual_fps = self.camera.get(cv2.CAP_PROP_FPS)
- actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)
- actual_height = self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT)
-
- # Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30)
- if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
- # Using `OSError` since it's a broad that encompasses issues related to device communication
- raise OSError(
- f"Can't set {self.fps=} for OpenCVCamera({self.camera_index}). Actual value is {actual_fps}."
- )
- if self.width is not None and not math.isclose(self.width, actual_width, rel_tol=1e-3):
- raise OSError(
- f"Can't set {self.width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}."
- )
- if self.height is not None and not math.isclose(self.height, actual_height, rel_tol=1e-3):
- raise OSError(
- f"Can't set {self.height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}."
- )
-
- self.fps = round(actual_fps)
- self.width = round(actual_width)
- self.height = round(actual_height)
-
- self.is_connected = True
-
- def read(self, temporary_color_mode: str | None = None) -> np.ndarray:
- """Read a frame from the camera returned in the format (height, width, channels)
- (e.g. 480 x 640 x 3), contrarily to the pytorch format which is channel first.
-
- Note: Reading a frame is done every `camera.fps` times per second, and it is blocking.
- If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
- """
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
- )
-
- start_time = time.perf_counter()
-
- ret, color_image = self.camera.read()
-
- if not ret:
- raise OSError(f"Can't capture color image from camera {self.camera_index}.")
-
- requested_color_mode = self.color_mode if temporary_color_mode is None else temporary_color_mode
-
- if requested_color_mode not in ["rgb", "bgr"]:
- raise ValueError(
- f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided."
- )
-
- # OpenCV uses BGR format as default (blue, green, red) for all operations, including displaying images.
- # However, Deep Learning framework such as LeRobot uses RGB format as default to train neural networks,
- # so we convert the image color from BGR to RGB.
- if requested_color_mode == "rgb":
- if self.mock:
- import tests.mock_cv2 as cv2
- else:
- import cv2
-
- color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
-
- h, w, _ = color_image.shape
- if h != self.height or w != self.width:
- raise OSError(
- f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
- )
-
- if self.rotation is not None:
- color_image = cv2.rotate(color_image, self.rotation)
-
- # log the number of seconds it took to read the image
- self.logs["delta_timestamp_s"] = time.perf_counter() - start_time
-
- # log the utc time at which the image was received
- self.logs["timestamp_utc"] = capture_timestamp_utc()
-
- self.color_image = color_image
-
- return color_image
-
- def read_loop(self):
- while not self.stop_event.is_set():
- try:
- self.color_image = self.read()
- except Exception as e:
- print(f"Error reading in thread: {e}")
-
- def async_read(self):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
- )
-
- if self.thread is None:
- self.stop_event = threading.Event()
- self.thread = Thread(target=self.read_loop, args=())
- self.thread.daemon = True
- self.thread.start()
-
- num_tries = 0
- while True:
- if self.color_image is not None:
- return self.color_image
-
- time.sleep(1 / self.fps)
- num_tries += 1
- if num_tries > self.fps * 2:
- raise TimeoutError("Timed out waiting for async_read() to start.")
-
- def disconnect(self):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
- )
-
- if self.thread is not None:
- self.stop_event.set()
- self.thread.join() # wait for the thread to finish
- self.thread = None
- self.stop_event = None
-
- self.camera.release()
- self.camera = None
- self.is_connected = False
-
- def __del__(self):
- if getattr(self, "is_connected", False):
- self.disconnect()
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="Save a few frames using `OpenCVCamera` for all cameras connected to the computer, or a selected subset."
- )
- parser.add_argument(
- "--camera-ids",
- type=int,
- nargs="*",
- default=None,
- help="List of camera indices used to instantiate the `OpenCVCamera`. If not provided, find and use all available camera indices.",
- )
- parser.add_argument(
- "--fps",
- type=int,
- default=None,
- help="Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.",
- )
- parser.add_argument(
- "--width",
- type=str,
- default=None,
- help="Set the width for all cameras. If not provided, use the default width of each camera.",
- )
- parser.add_argument(
- "--height",
- type=str,
- default=None,
- help="Set the height for all cameras. If not provided, use the default height of each camera.",
- )
- parser.add_argument(
- "--images-dir",
- type=Path,
- default="outputs/images_from_opencv_cameras",
- help="Set directory to save a few frames for each camera.",
- )
- parser.add_argument(
- "--record-time-s",
- type=float,
- default=4.0,
- help="Set the number of seconds used to record the frames. By default, 2 seconds.",
- )
- args = parser.parse_args()
- save_images_from_cameras(**vars(args))
diff --git a/lerobot/common/robot_devices/cameras/utils.py b/lerobot/common/robot_devices/cameras/utils.py
deleted file mode 100644
index 7904a57a5c..0000000000
--- a/lerobot/common/robot_devices/cameras/utils.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from typing import Protocol
-
-import numpy as np
-
-
-# Defines a camera type
-class Camera(Protocol):
- def connect(self): ...
- def read(self, temporary_color: str | None = None) -> np.ndarray: ...
- def async_read(self) -> np.ndarray: ...
- def disconnect(self): ...
diff --git a/lerobot/common/robot_devices/motors/dynamixel.py b/lerobot/common/robot_devices/motors/dynamixel.py
deleted file mode 100644
index 1e1396f766..0000000000
--- a/lerobot/common/robot_devices/motors/dynamixel.py
+++ /dev/null
@@ -1,867 +0,0 @@
-import enum
-import logging
-import math
-import time
-import traceback
-from copy import deepcopy
-
-import numpy as np
-import tqdm
-
-from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
-from lerobot.common.utils.utils import capture_timestamp_utc
-
-PROTOCOL_VERSION = 2.0
-BAUDRATE = 1_000_000
-TIMEOUT_MS = 1000
-
-MAX_ID_RANGE = 252
-
-# The following bounds define the lower and upper joints range (after calibration).
-# For joints in degree (i.e. revolute joints), their nominal range is [-180, 180] degrees
-# which corresponds to a half rotation on the left and half rotation on the right.
-# Some joints might require higher range, so we allow up to [-270, 270] degrees until
-# an error is raised.
-LOWER_BOUND_DEGREE = -270
-UPPER_BOUND_DEGREE = 270
-# For joints in percentage (i.e. joints that move linearly like the prismatic joint of a gripper),
-# their nominal range is [0, 100] %. For instance, for Aloha gripper, 0% is fully
-# closed, and 100% is fully open. To account for slight calibration issue, we allow up to
-# [-10, 110] until an error is raised.
-LOWER_BOUND_LINEAR = -10
-UPPER_BOUND_LINEAR = 110
-
-HALF_TURN_DEGREE = 180
-
-# https://emanual.robotis.com/docs/en/dxl/x/xl330-m077
-# https://emanual.robotis.com/docs/en/dxl/x/xl330-m288
-# https://emanual.robotis.com/docs/en/dxl/x/xl430-w250
-# https://emanual.robotis.com/docs/en/dxl/x/xm430-w350
-# https://emanual.robotis.com/docs/en/dxl/x/xm540-w270
-# https://emanual.robotis.com/docs/en/dxl/x/xc430-w150
-
-# data_name: (address, size_byte)
-X_SERIES_CONTROL_TABLE = {
- "Model_Number": (0, 2),
- "Model_Information": (2, 4),
- "Firmware_Version": (6, 1),
- "ID": (7, 1),
- "Baud_Rate": (8, 1),
- "Return_Delay_Time": (9, 1),
- "Drive_Mode": (10, 1),
- "Operating_Mode": (11, 1),
- "Secondary_ID": (12, 1),
- "Protocol_Type": (13, 1),
- "Homing_Offset": (20, 4),
- "Moving_Threshold": (24, 4),
- "Temperature_Limit": (31, 1),
- "Max_Voltage_Limit": (32, 2),
- "Min_Voltage_Limit": (34, 2),
- "PWM_Limit": (36, 2),
- "Current_Limit": (38, 2),
- "Acceleration_Limit": (40, 4),
- "Velocity_Limit": (44, 4),
- "Max_Position_Limit": (48, 4),
- "Min_Position_Limit": (52, 4),
- "Shutdown": (63, 1),
- "Torque_Enable": (64, 1),
- "LED": (65, 1),
- "Status_Return_Level": (68, 1),
- "Registered_Instruction": (69, 1),
- "Hardware_Error_Status": (70, 1),
- "Velocity_I_Gain": (76, 2),
- "Velocity_P_Gain": (78, 2),
- "Position_D_Gain": (80, 2),
- "Position_I_Gain": (82, 2),
- "Position_P_Gain": (84, 2),
- "Feedforward_2nd_Gain": (88, 2),
- "Feedforward_1st_Gain": (90, 2),
- "Bus_Watchdog": (98, 1),
- "Goal_PWM": (100, 2),
- "Goal_Current": (102, 2),
- "Goal_Velocity": (104, 4),
- "Profile_Acceleration": (108, 4),
- "Profile_Velocity": (112, 4),
- "Goal_Position": (116, 4),
- "Realtime_Tick": (120, 2),
- "Moving": (122, 1),
- "Moving_Status": (123, 1),
- "Present_PWM": (124, 2),
- "Present_Current": (126, 2),
- "Present_Velocity": (128, 4),
- "Present_Position": (132, 4),
- "Velocity_Trajectory": (136, 4),
- "Position_Trajectory": (140, 4),
- "Present_Input_Voltage": (144, 2),
- "Present_Temperature": (146, 1),
-}
-
-X_SERIES_BAUDRATE_TABLE = {
- 0: 9_600,
- 1: 57_600,
- 2: 115_200,
- 3: 1_000_000,
- 4: 2_000_000,
- 5: 3_000_000,
- 6: 4_000_000,
-}
-
-CALIBRATION_REQUIRED = ["Goal_Position", "Present_Position"]
-CONVERT_UINT32_TO_INT32_REQUIRED = ["Goal_Position", "Present_Position"]
-
-MODEL_CONTROL_TABLE = {
- "x_series": X_SERIES_CONTROL_TABLE,
- "xl330-m077": X_SERIES_CONTROL_TABLE,
- "xl330-m288": X_SERIES_CONTROL_TABLE,
- "xl430-w250": X_SERIES_CONTROL_TABLE,
- "xm430-w350": X_SERIES_CONTROL_TABLE,
- "xm540-w270": X_SERIES_CONTROL_TABLE,
- "xc430-w150": X_SERIES_CONTROL_TABLE,
-}
-
-MODEL_RESOLUTION = {
- "x_series": 4096,
- "xl330-m077": 4096,
- "xl330-m288": 4096,
- "xl430-w250": 4096,
- "xm430-w350": 4096,
- "xm540-w270": 4096,
- "xc430-w150": 4096,
-}
-
-MODEL_BAUDRATE_TABLE = {
- "x_series": X_SERIES_BAUDRATE_TABLE,
- "xl330-m077": X_SERIES_BAUDRATE_TABLE,
- "xl330-m288": X_SERIES_BAUDRATE_TABLE,
- "xl430-w250": X_SERIES_BAUDRATE_TABLE,
- "xm430-w350": X_SERIES_BAUDRATE_TABLE,
- "xm540-w270": X_SERIES_BAUDRATE_TABLE,
- "xc430-w150": X_SERIES_BAUDRATE_TABLE,
-}
-
-NUM_READ_RETRY = 10
-NUM_WRITE_RETRY = 10
-
-
-def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray:
- """This function converts the degree range to the step range for indicating motors rotation.
- It assumes a motor achieves a full rotation by going from -180 degree position to +180.
- The motor resolution (e.g. 4096) corresponds to the number of steps needed to achieve a full rotation.
- """
- resolutions = [MODEL_RESOLUTION[model] for model in models]
- steps = degrees / 180 * np.array(resolutions) / 2
- steps = steps.astype(int)
- return steps
-
-
-def convert_to_bytes(value, bytes, mock=False):
- if mock:
- return value
-
- import dynamixel_sdk as dxl
-
- # Note: No need to convert back into unsigned int, since this byte preprocessing
- # already handles it for us.
- if bytes == 1:
- data = [
- dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
- ]
- elif bytes == 2:
- data = [
- dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
- dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
- ]
- elif bytes == 4:
- data = [
- dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
- dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
- dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)),
- dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)),
- ]
- else:
- raise NotImplementedError(
- f"Value of the number of bytes to be sent is expected to be in [1, 2, 4], but "
- f"{bytes} is provided instead."
- )
- return data
-
-
-def get_group_sync_key(data_name, motor_names):
- group_key = f"{data_name}_" + "_".join(motor_names)
- return group_key
-
-
-def get_result_name(fn_name, data_name, motor_names):
- group_key = get_group_sync_key(data_name, motor_names)
- rslt_name = f"{fn_name}_{group_key}"
- return rslt_name
-
-
-def get_queue_name(fn_name, data_name, motor_names):
- group_key = get_group_sync_key(data_name, motor_names)
- queue_name = f"{fn_name}_{group_key}"
- return queue_name
-
-
-def get_log_name(var_name, fn_name, data_name, motor_names):
- group_key = get_group_sync_key(data_name, motor_names)
- log_name = f"{var_name}_{fn_name}_{group_key}"
- return log_name
-
-
-def assert_same_address(model_ctrl_table, motor_models, data_name):
- all_addr = []
- all_bytes = []
- for model in motor_models:
- addr, bytes = model_ctrl_table[model][data_name]
- all_addr.append(addr)
- all_bytes.append(bytes)
-
- if len(set(all_addr)) != 1:
- raise NotImplementedError(
- f"At least two motor models use a different address for `data_name`='{data_name}' ({list(zip(motor_models, all_addr, strict=False))}). Contact a LeRobot maintainer."
- )
-
- if len(set(all_bytes)) != 1:
- raise NotImplementedError(
- f"At least two motor models use a different bytes representation for `data_name`='{data_name}' ({list(zip(motor_models, all_bytes, strict=False))}). Contact a LeRobot maintainer."
- )
-
-
-class TorqueMode(enum.Enum):
- ENABLED = 1
- DISABLED = 0
-
-
-class DriveMode(enum.Enum):
- NON_INVERTED = 0
- INVERTED = 1
-
-
-class CalibrationMode(enum.Enum):
- # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
- DEGREE = 0
- # Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
- LINEAR = 1
-
-
-class JointOutOfRangeError(Exception):
- def __init__(self, message="Joint is out of range"):
- self.message = message
- super().__init__(self.message)
-
-
-class DynamixelMotorsBus:
- # TODO(rcadene): Add a script to find the motor indices without DynamixelWizzard2
- """
- The DynamixelMotorsBus class allows to efficiently read and write to the attached motors. It relies on
- the python dynamixel sdk to communicate with the motors. For more info, see the [Dynamixel SDK Documentation](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20).
-
- A DynamixelMotorsBus instance requires a port (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
- To find the port, you can run our utility script:
- ```bash
- python lerobot/scripts/find_motors_bus_port.py
- >>> Finding all available ports for the MotorBus.
- >>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
- >>> Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
- >>> The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751.
- >>> Reconnect the usb cable.
- ```
-
- Example of usage for 1 motor connected to the bus:
- ```python
- motor_name = "gripper"
- motor_index = 6
- motor_model = "xl330-m288"
-
- motors_bus = DynamixelMotorsBus(
- port="/dev/tty.usbmodem575E0031751",
- motors={motor_name: (motor_index, motor_model)},
- )
- motors_bus.connect()
-
- position = motors_bus.read("Present_Position")
-
- # move from a few motor steps as an example
- few_steps = 30
- motors_bus.write("Goal_Position", position + few_steps)
-
- # when done, consider disconnecting
- motors_bus.disconnect()
- ```
- """
-
- def __init__(
- self,
- port: str,
- motors: dict[str, tuple[int, str]],
- extra_model_control_table: dict[str, list[tuple]] | None = None,
- extra_model_resolution: dict[str, int] | None = None,
- mock=False,
- ):
- self.port = port
- self.motors = motors
- self.mock = mock
-
- self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
- if extra_model_control_table:
- self.model_ctrl_table.update(extra_model_control_table)
-
- self.model_resolution = deepcopy(MODEL_RESOLUTION)
- if extra_model_resolution:
- self.model_resolution.update(extra_model_resolution)
-
- self.port_handler = None
- self.packet_handler = None
- self.calibration = None
- self.is_connected = False
- self.group_readers = {}
- self.group_writers = {}
- self.logs = {}
-
- def connect(self):
- if self.is_connected:
- raise RobotDeviceAlreadyConnectedError(
- f"DynamixelMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice."
- )
-
- if self.mock:
- import tests.mock_dynamixel_sdk as dxl
- else:
- import dynamixel_sdk as dxl
-
- self.port_handler = dxl.PortHandler(self.port)
- self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
-
- try:
- if not self.port_handler.openPort():
- raise OSError(f"Failed to open port '{self.port}'.")
- except Exception:
- traceback.print_exc()
- print(
- "\nTry running `python lerobot/scripts/find_motors_bus_port.py` to make sure you are using the correct port.\n"
- )
- raise
-
- # Allow to read and write
- self.is_connected = True
-
- self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS)
-
- def reconnect(self):
- if self.mock:
- import tests.mock_dynamixel_sdk as dxl
- else:
- import dynamixel_sdk as dxl
-
- self.port_handler = dxl.PortHandler(self.port)
- self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
-
- if not self.port_handler.openPort():
- raise OSError(f"Failed to open port '{self.port}'.")
-
- self.is_connected = True
-
- def are_motors_configured(self):
- # Only check the motor indices and not baudrate, since if the motor baudrates are incorrect,
- # a ConnectionError will be raised anyway.
- try:
- return (self.motor_indices == self.read("ID")).all()
- except ConnectionError as e:
- print(e)
- return False
-
- def find_motor_indices(self, possible_ids=None, num_retry=2):
- if possible_ids is None:
- possible_ids = range(MAX_ID_RANGE)
-
- indices = []
- for idx in tqdm.tqdm(possible_ids):
- try:
- present_idx = self.read_with_motor_ids(self.motor_models, [idx], "ID", num_retry=num_retry)[0]
- except ConnectionError:
- continue
-
- if idx != present_idx:
- # sanity check
- raise OSError(
- "Motor index used to communicate through the bus is not the same as the one present in the motor memory. The motor memory might be damaged."
- )
- indices.append(idx)
-
- return indices
-
- def set_bus_baudrate(self, baudrate):
- present_bus_baudrate = self.port_handler.getBaudRate()
- if present_bus_baudrate != baudrate:
- print(f"Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.")
- self.port_handler.setBaudRate(baudrate)
-
- if self.port_handler.getBaudRate() != baudrate:
- raise OSError("Failed to write bus baud rate.")
-
- @property
- def motor_names(self) -> list[str]:
- return list(self.motors.keys())
-
- @property
- def motor_models(self) -> list[str]:
- return [model for _, model in self.motors.values()]
-
- @property
- def motor_indices(self) -> list[int]:
- return [idx for idx, _ in self.motors.values()]
-
- def set_calibration(self, calibration: dict[str, list]):
- self.calibration = calibration
-
- def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None):
- """This function applies the calibration, automatically detects out of range errors for motors values and attempts to correct.
-
- For more info, see docstring of `apply_calibration` and `autocorrect_calibration`.
- """
- try:
- values = self.apply_calibration(values, motor_names)
- except JointOutOfRangeError as e:
- print(e)
- self.autocorrect_calibration(values, motor_names)
- values = self.apply_calibration(values, motor_names)
- return values
-
- def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
- """Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
- a "zero position" at 0 degree.
-
- Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
- rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
-
- Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
- when given a goal position that is + or - their resolution. For instance, dynamixel xl330-m077 have a resolution of 4096, and
- at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
- or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
- To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
- in the centered nominal degree range ]-180, 180[.
- """
- if motor_names is None:
- motor_names = self.motor_names
-
- # Convert from unsigned int32 original range [0, 2**32] to signed float32 range
- values = values.astype(np.float32)
-
- for i, name in enumerate(motor_names):
- calib_idx = self.calibration["motor_names"].index(name)
- calib_mode = self.calibration["calib_mode"][calib_idx]
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- drive_mode = self.calibration["drive_mode"][calib_idx]
- homing_offset = self.calibration["homing_offset"][calib_idx]
- _, model = self.motors[name]
- resolution = self.model_resolution[model]
-
- # Update direction of rotation of the motor to match between leader and follower.
- # In fact, the motor of the leader for a given joint can be assembled in an
- # opposite direction in term of rotation than the motor of the follower on the same joint.
- if drive_mode:
- values[i] *= -1
-
- # Convert from range [-2**31, 2**31] to
- # nominal range [-resolution//2, resolution//2] (e.g. [-2048, 2048])
- values[i] += homing_offset
-
- # Convert from range [-resolution//2, resolution//2] to
- # universal float32 centered degree range [-180, 180]
- # (e.g. 2048 / (4096 // 2) * 180 = 180)
- values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE
-
- if (values[i] < LOWER_BOUND_DEGREE) or (values[i] > UPPER_BOUND_DEGREE):
- raise JointOutOfRangeError(
- f"Wrong motor position range detected for {name}. "
- f"Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), "
- f"with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, "
- f"but present value is {values[i]} degree. "
- "This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. "
- "You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
- )
-
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- start_pos = self.calibration["start_pos"][calib_idx]
- end_pos = self.calibration["end_pos"][calib_idx]
-
- # Rescale the present position to a nominal range [0, 100] %,
- # useful for joints with linear motions like Aloha gripper
- values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
-
- if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
- raise JointOutOfRangeError(
- f"Wrong motor position range detected for {name}. "
- f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
- f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
- f"but present value is {values[i]} %. "
- "This might be due to a cable connection issue creating an artificial jump in motor values. "
- "You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
- )
-
- return values
-
- def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
- """This function automatically detects issues with values of motors after calibration, and correct for these issues.
-
- Some motors might have values outside of expected maximum bounds after calibration.
- For instance, for a joint in degree, its value can be outside [-270, 270] degrees, which is totally unexpected given
- a nominal range of [-180, 180] degrees, which represents half a turn to the left or right starting from zero position.
-
- Known issues:
- #1: Motor value randomly shifts of a full turn, caused by hardware/connection errors.
- #2: Motor internal homing offset is shifted by a full turn, caused by using default calibration (e.g Aloha).
- #3: motor internal homing offset is shifted by less or more than a full turn, caused by using default calibration
- or by human error during manual calibration.
-
- Issues #1 and #2 can be solved by shifting the calibration homing offset by a full turn.
- Issue #3 will be visually detected by user and potentially captured by the safety feature `max_relative_target`,
- that will slow down the motor, raise an error asking to recalibrate. Manual recalibrating will solve the issue.
-
- Note: A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
- """
- if motor_names is None:
- motor_names = self.motor_names
-
- # Convert from unsigned int32 original range [0, 2**32] to signed float32 range
- values = values.astype(np.float32)
-
- for i, name in enumerate(motor_names):
- calib_idx = self.calibration["motor_names"].index(name)
- calib_mode = self.calibration["calib_mode"][calib_idx]
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- drive_mode = self.calibration["drive_mode"][calib_idx]
- homing_offset = self.calibration["homing_offset"][calib_idx]
- _, model = self.motors[name]
- resolution = self.model_resolution[model]
-
- # Update direction of rotation of the motor to match between leader and follower.
- # In fact, the motor of the leader for a given joint can be assembled in an
- # opposite direction in term of rotation than the motor of the follower on the same joint.
- if drive_mode:
- values[i] *= -1
-
- # Convert from initial range to range [-180, 180] degrees
- calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE
- in_range = (calib_val > LOWER_BOUND_DEGREE) and (calib_val < UPPER_BOUND_DEGREE)
-
- # Solve this inequality to find the factor to shift the range into [-180, 180] degrees
- # values[i] = (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE
- # - HALF_TURN_DEGREE <= (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE <= HALF_TURN_DEGREE
- # (- (resolution // 2) - values[i] - homing_offset) / resolution <= factor <= ((resolution // 2) - values[i] - homing_offset) / resolution
- low_factor = (-(resolution // 2) - values[i] - homing_offset) / resolution
- upp_factor = ((resolution // 2) - values[i] - homing_offset) / resolution
-
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- start_pos = self.calibration["start_pos"][calib_idx]
- end_pos = self.calibration["end_pos"][calib_idx]
-
- # Convert from initial range to range [0, 100] in %
- calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100
- in_range = (calib_val > LOWER_BOUND_LINEAR) and (calib_val < UPPER_BOUND_LINEAR)
-
- # Solve this inequality to find the factor to shift the range into [0, 100] %
- # values[i] = (values[i] - start_pos + resolution * factor) / (end_pos + resolution * factor - start_pos - resolution * factor) * 100
- # values[i] = (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100
- # 0 <= (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 <= 100
- # (start_pos - values[i]) / resolution <= factor <= (end_pos - values[i]) / resolution
- low_factor = (start_pos - values[i]) / resolution
- upp_factor = (end_pos - values[i]) / resolution
-
- if not in_range:
- # Get first integer between the two bounds
- if low_factor < upp_factor:
- factor = math.ceil(low_factor)
-
- if factor > upp_factor:
- raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
- else:
- factor = math.ceil(upp_factor)
-
- if factor > low_factor:
- raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- out_of_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
- in_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- out_of_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
- in_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
-
- logging.warning(
- f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, "
- f"from '{out_of_range_str}' to '{in_range_str}'."
- )
-
- # A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
- self.calibration["homing_offset"][calib_idx] += resolution * factor
-
- def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
- """Inverse of `apply_calibration`."""
- if motor_names is None:
- motor_names = self.motor_names
-
- for i, name in enumerate(motor_names):
- calib_idx = self.calibration["motor_names"].index(name)
- calib_mode = self.calibration["calib_mode"][calib_idx]
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- drive_mode = self.calibration["drive_mode"][calib_idx]
- homing_offset = self.calibration["homing_offset"][calib_idx]
- _, model = self.motors[name]
- resolution = self.model_resolution[model]
-
- # Convert from nominal 0-centered degree range [-180, 180] to
- # 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
- values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
-
- # Substract the homing offsets to come back to actual motor range of values
- # which can be arbitrary.
- values[i] -= homing_offset
-
- # Remove drive mode, which is the rotation direction of the motor, to come back to
- # actual motor rotation direction which can be arbitrary.
- if drive_mode:
- values[i] *= -1
-
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- start_pos = self.calibration["start_pos"][calib_idx]
- end_pos = self.calibration["end_pos"][calib_idx]
-
- # Convert from nominal lnear range of [0, 100] % to
- # actual motor range of values which can be arbitrary.
- values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
-
- values = np.round(values).astype(np.int32)
- return values
-
- def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
- if self.mock:
- import tests.mock_dynamixel_sdk as dxl
- else:
- import dynamixel_sdk as dxl
-
- return_list = True
- if not isinstance(motor_ids, list):
- return_list = False
- motor_ids = [motor_ids]
-
- assert_same_address(self.model_ctrl_table, self.motor_models, data_name)
- addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
- group = dxl.GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes)
- for idx in motor_ids:
- group.addParam(idx)
-
- for _ in range(num_retry):
- comm = group.txRxPacket()
- if comm == dxl.COMM_SUCCESS:
- break
-
- if comm != dxl.COMM_SUCCESS:
- raise ConnectionError(
- f"Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- values = []
- for idx in motor_ids:
- value = group.getData(idx, addr, bytes)
- values.append(value)
-
- if return_list:
- return values
- else:
- return values[0]
-
- def read(self, data_name, motor_names: str | list[str] | None = None):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
- )
-
- start_time = time.perf_counter()
-
- if self.mock:
- import tests.mock_dynamixel_sdk as dxl
- else:
- import dynamixel_sdk as dxl
-
- if motor_names is None:
- motor_names = self.motor_names
-
- if isinstance(motor_names, str):
- motor_names = [motor_names]
-
- motor_ids = []
- models = []
- for name in motor_names:
- motor_idx, model = self.motors[name]
- motor_ids.append(motor_idx)
- models.append(model)
-
- assert_same_address(self.model_ctrl_table, models, data_name)
- addr, bytes = self.model_ctrl_table[model][data_name]
- group_key = get_group_sync_key(data_name, motor_names)
-
- if data_name not in self.group_readers:
- # create new group reader
- self.group_readers[group_key] = dxl.GroupSyncRead(
- self.port_handler, self.packet_handler, addr, bytes
- )
- for idx in motor_ids:
- self.group_readers[group_key].addParam(idx)
-
- for _ in range(NUM_READ_RETRY):
- comm = self.group_readers[group_key].txRxPacket()
- if comm == dxl.COMM_SUCCESS:
- break
-
- if comm != dxl.COMM_SUCCESS:
- raise ConnectionError(
- f"Read failed due to communication error on port {self.port} for group_key {group_key}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- values = []
- for idx in motor_ids:
- value = self.group_readers[group_key].getData(idx, addr, bytes)
- values.append(value)
-
- values = np.array(values)
-
- # Convert to signed int to use range [-2048, 2048] for our motor positions.
- if data_name in CONVERT_UINT32_TO_INT32_REQUIRED:
- values = values.astype(np.int32)
-
- if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
- values = self.apply_calibration_autocorrect(values, motor_names)
-
- # log the number of seconds it took to read the data from the motors
- delta_ts_name = get_log_name("delta_timestamp_s", "read", data_name, motor_names)
- self.logs[delta_ts_name] = time.perf_counter() - start_time
-
- # log the utc time at which the data was received
- ts_utc_name = get_log_name("timestamp_utc", "read", data_name, motor_names)
- self.logs[ts_utc_name] = capture_timestamp_utc()
-
- return values
-
- def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
- if self.mock:
- import tests.mock_dynamixel_sdk as dxl
- else:
- import dynamixel_sdk as dxl
-
- if not isinstance(motor_ids, list):
- motor_ids = [motor_ids]
- if not isinstance(values, list):
- values = [values]
-
- assert_same_address(self.model_ctrl_table, motor_models, data_name)
- addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
- group = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes)
- for idx, value in zip(motor_ids, values, strict=True):
- data = convert_to_bytes(value, bytes, self.mock)
- group.addParam(idx, data)
-
- for _ in range(num_retry):
- comm = group.txPacket()
- if comm == dxl.COMM_SUCCESS:
- break
-
- if comm != dxl.COMM_SUCCESS:
- raise ConnectionError(
- f"Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
- )
-
- start_time = time.perf_counter()
-
- if self.mock:
- import tests.mock_dynamixel_sdk as dxl
- else:
- import dynamixel_sdk as dxl
-
- if motor_names is None:
- motor_names = self.motor_names
-
- if isinstance(motor_names, str):
- motor_names = [motor_names]
-
- if isinstance(values, (int, float, np.integer)):
- values = [int(values)] * len(motor_names)
-
- values = np.array(values)
-
- motor_ids = []
- models = []
- for name in motor_names:
- motor_idx, model = self.motors[name]
- motor_ids.append(motor_idx)
- models.append(model)
-
- if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
- values = self.revert_calibration(values, motor_names)
-
- values = values.tolist()
-
- assert_same_address(self.model_ctrl_table, models, data_name)
- addr, bytes = self.model_ctrl_table[model][data_name]
- group_key = get_group_sync_key(data_name, motor_names)
-
- init_group = data_name not in self.group_readers
- if init_group:
- self.group_writers[group_key] = dxl.GroupSyncWrite(
- self.port_handler, self.packet_handler, addr, bytes
- )
-
- for idx, value in zip(motor_ids, values, strict=True):
- data = convert_to_bytes(value, bytes, self.mock)
- if init_group:
- self.group_writers[group_key].addParam(idx, data)
- else:
- self.group_writers[group_key].changeParam(idx, data)
-
- comm = self.group_writers[group_key].txPacket()
- if comm != dxl.COMM_SUCCESS:
- raise ConnectionError(
- f"Write failed due to communication error on port {self.port} for group_key {group_key}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- # log the number of seconds it took to write the data to the motors
- delta_ts_name = get_log_name("delta_timestamp_s", "write", data_name, motor_names)
- self.logs[delta_ts_name] = time.perf_counter() - start_time
-
- # TODO(rcadene): should we log the time before sending the write command?
- # log the utc time when the write has been completed
- ts_utc_name = get_log_name("timestamp_utc", "write", data_name, motor_names)
- self.logs[ts_utc_name] = capture_timestamp_utc()
-
- def disconnect(self):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"DynamixelMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first."
- )
-
- if self.port_handler is not None:
- self.port_handler.closePort()
- self.port_handler = None
-
- self.packet_handler = None
- self.group_readers = {}
- self.group_writers = {}
- self.is_connected = False
-
- def __del__(self):
- if getattr(self, "is_connected", False):
- self.disconnect()
diff --git a/lerobot/common/robot_devices/motors/feetech.py b/lerobot/common/robot_devices/motors/feetech.py
deleted file mode 100644
index 0d5480f7a8..0000000000
--- a/lerobot/common/robot_devices/motors/feetech.py
+++ /dev/null
@@ -1,887 +0,0 @@
-import enum
-import logging
-import math
-import time
-import traceback
-from copy import deepcopy
-
-import numpy as np
-import tqdm
-
-from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
-from lerobot.common.utils.utils import capture_timestamp_utc
-
-PROTOCOL_VERSION = 0
-BAUDRATE = 1_000_000
-TIMEOUT_MS = 1000
-
-MAX_ID_RANGE = 252
-
-# The following bounds define the lower and upper joints range (after calibration).
-# For joints in degree (i.e. revolute joints), their nominal range is [-180, 180] degrees
-# which corresponds to a half rotation on the left and half rotation on the right.
-# Some joints might require higher range, so we allow up to [-270, 270] degrees until
-# an error is raised.
-LOWER_BOUND_DEGREE = -270
-UPPER_BOUND_DEGREE = 270
-# For joints in percentage (i.e. joints that move linearly like the prismatic joint of a gripper),
-# their nominal range is [0, 100] %. For instance, for Aloha gripper, 0% is fully
-# closed, and 100% is fully open. To account for slight calibration issue, we allow up to
-# [-10, 110] until an error is raised.
-LOWER_BOUND_LINEAR = -10
-UPPER_BOUND_LINEAR = 110
-
-HALF_TURN_DEGREE = 180
-
-
-# See this link for STS3215 Memory Table:
-# https://docs.google.com/spreadsheets/d/1GVs7W1VS1PqdhA1nW-abeyAHhTUxKUdR/edit?usp=sharing&ouid=116566590112741600240&rtpof=true&sd=true
-# data_name: (address, size_byte)
-SCS_SERIES_CONTROL_TABLE = {
- "Model": (3, 2),
- "ID": (5, 1),
- "Baud_Rate": (6, 1),
- "Return_Delay": (7, 1),
- "Response_Status_Level": (8, 1),
- "Min_Angle_Limit": (9, 2),
- "Max_Angle_Limit": (11, 2),
- "Max_Temperature_Limit": (13, 1),
- "Max_Voltage_Limit": (14, 1),
- "Min_Voltage_Limit": (15, 1),
- "Max_Torque_Limit": (16, 2),
- "Phase": (18, 1),
- "Unloading_Condition": (19, 1),
- "LED_Alarm_Condition": (20, 1),
- "P_Coefficient": (21, 1),
- "D_Coefficient": (22, 1),
- "I_Coefficient": (23, 1),
- "Minimum_Startup_Force": (24, 2),
- "CW_Dead_Zone": (26, 1),
- "CCW_Dead_Zone": (27, 1),
- "Protection_Current": (28, 2),
- "Angular_Resolution": (30, 1),
- "Offset": (31, 2),
- "Mode": (33, 1),
- "Protective_Torque": (34, 1),
- "Protection_Time": (35, 1),
- "Overload_Torque": (36, 1),
- "Speed_closed_loop_P_proportional_coefficient": (37, 1),
- "Over_Current_Protection_Time": (38, 1),
- "Velocity_closed_loop_I_integral_coefficient": (39, 1),
- "Torque_Enable": (40, 1),
- "Acceleration": (41, 1),
- "Goal_Position": (42, 2),
- "Goal_Time": (44, 2),
- "Goal_Speed": (46, 2),
- "Torque_Limit": (48, 2),
- "Lock": (55, 1),
- "Present_Position": (56, 2),
- "Present_Speed": (58, 2),
- "Present_Load": (60, 2),
- "Present_Voltage": (62, 1),
- "Present_Temperature": (63, 1),
- "Status": (65, 1),
- "Moving": (66, 1),
- "Present_Current": (69, 2),
- # Not in the Memory Table
- "Maximum_Acceleration": (85, 2),
-}
-
-SCS_SERIES_BAUDRATE_TABLE = {
- 0: 1_000_000,
- 1: 500_000,
- 2: 250_000,
- 3: 128_000,
- 4: 115_200,
- 5: 57_600,
- 6: 38_400,
- 7: 19_200,
-}
-
-CALIBRATION_REQUIRED = ["Goal_Position", "Present_Position"]
-CONVERT_UINT32_TO_INT32_REQUIRED = ["Goal_Position", "Present_Position"]
-
-
-MODEL_CONTROL_TABLE = {
- "scs_series": SCS_SERIES_CONTROL_TABLE,
- "sts3215": SCS_SERIES_CONTROL_TABLE,
-}
-
-MODEL_RESOLUTION = {
- "scs_series": 4096,
- "sts3215": 4096,
-}
-
-MODEL_BAUDRATE_TABLE = {
- "scs_series": SCS_SERIES_BAUDRATE_TABLE,
- "sts3215": SCS_SERIES_BAUDRATE_TABLE,
-}
-
-# High number of retries is needed for feetech compared to dynamixel motors.
-NUM_READ_RETRY = 20
-NUM_WRITE_RETRY = 20
-
-
-def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray:
- """This function converts the degree range to the step range for indicating motors rotation.
- It assumes a motor achieves a full rotation by going from -180 degree position to +180.
- The motor resolution (e.g. 4096) corresponds to the number of steps needed to achieve a full rotation.
- """
- resolutions = [MODEL_RESOLUTION[model] for model in models]
- steps = degrees / 180 * np.array(resolutions) / 2
- steps = steps.astype(int)
- return steps
-
-
-def convert_to_bytes(value, bytes, mock=False):
- if mock:
- return value
-
- import scservo_sdk as scs
-
- # Note: No need to convert back into unsigned int, since this byte preprocessing
- # already handles it for us.
- if bytes == 1:
- data = [
- scs.SCS_LOBYTE(scs.SCS_LOWORD(value)),
- ]
- elif bytes == 2:
- data = [
- scs.SCS_LOBYTE(scs.SCS_LOWORD(value)),
- scs.SCS_HIBYTE(scs.SCS_LOWORD(value)),
- ]
- elif bytes == 4:
- data = [
- scs.SCS_LOBYTE(scs.SCS_LOWORD(value)),
- scs.SCS_HIBYTE(scs.SCS_LOWORD(value)),
- scs.SCS_LOBYTE(scs.SCS_HIWORD(value)),
- scs.SCS_HIBYTE(scs.SCS_HIWORD(value)),
- ]
- else:
- raise NotImplementedError(
- f"Value of the number of bytes to be sent is expected to be in [1, 2, 4], but "
- f"{bytes} is provided instead."
- )
- return data
-
-
-def get_group_sync_key(data_name, motor_names):
- group_key = f"{data_name}_" + "_".join(motor_names)
- return group_key
-
-
-def get_result_name(fn_name, data_name, motor_names):
- group_key = get_group_sync_key(data_name, motor_names)
- rslt_name = f"{fn_name}_{group_key}"
- return rslt_name
-
-
-def get_queue_name(fn_name, data_name, motor_names):
- group_key = get_group_sync_key(data_name, motor_names)
- queue_name = f"{fn_name}_{group_key}"
- return queue_name
-
-
-def get_log_name(var_name, fn_name, data_name, motor_names):
- group_key = get_group_sync_key(data_name, motor_names)
- log_name = f"{var_name}_{fn_name}_{group_key}"
- return log_name
-
-
-def assert_same_address(model_ctrl_table, motor_models, data_name):
- all_addr = []
- all_bytes = []
- for model in motor_models:
- addr, bytes = model_ctrl_table[model][data_name]
- all_addr.append(addr)
- all_bytes.append(bytes)
-
- if len(set(all_addr)) != 1:
- raise NotImplementedError(
- f"At least two motor models use a different address for `data_name`='{data_name}' ({list(zip(motor_models, all_addr, strict=False))}). Contact a LeRobot maintainer."
- )
-
- if len(set(all_bytes)) != 1:
- raise NotImplementedError(
- f"At least two motor models use a different bytes representation for `data_name`='{data_name}' ({list(zip(motor_models, all_bytes, strict=False))}). Contact a LeRobot maintainer."
- )
-
-
-class TorqueMode(enum.Enum):
- ENABLED = 1
- DISABLED = 0
-
-
-class DriveMode(enum.Enum):
- NON_INVERTED = 0
- INVERTED = 1
-
-
-class CalibrationMode(enum.Enum):
- # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
- DEGREE = 0
- # Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
- LINEAR = 1
-
-
-class JointOutOfRangeError(Exception):
- def __init__(self, message="Joint is out of range"):
- self.message = message
- super().__init__(self.message)
-
-
-class FeetechMotorsBus:
- """
- The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on
- the python feetech sdk to communicate with the motors. For more info, see the [feetech SDK Documentation](https://emanual.robotis.com/docs/en/software/feetech/feetech_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20).
-
- A FeetechMotorsBus instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
- To find the port, you can run our utility script:
- ```bash
- python lerobot/scripts/find_motors_bus_port.py
- >>> Finding all available ports for the MotorsBus.
- >>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
- >>> Remove the usb cable from your FeetechMotorsBus and press Enter when done.
- >>> The port of this FeetechMotorsBus is /dev/tty.usbmodem575E0031751.
- >>> Reconnect the usb cable.
- ```
-
- Example of usage for 1 motor connected to the bus:
- ```python
- motor_name = "gripper"
- motor_index = 6
- motor_model = "sts3215"
-
- motors_bus = FeetechMotorsBus(
- port="/dev/tty.usbmodem575E0031751",
- motors={motor_name: (motor_index, motor_model)},
- )
- motors_bus.connect()
-
- position = motors_bus.read("Present_Position")
-
- # move from a few motor steps as an example
- few_steps = 30
- motors_bus.write("Goal_Position", position + few_steps)
-
- # when done, consider disconnecting
- motors_bus.disconnect()
- ```
- """
-
- def __init__(
- self,
- port: str,
- motors: dict[str, tuple[int, str]],
- extra_model_control_table: dict[str, list[tuple]] | None = None,
- extra_model_resolution: dict[str, int] | None = None,
- mock=False,
- ):
- self.port = port
- self.motors = motors
- self.mock = mock
-
- self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
- if extra_model_control_table:
- self.model_ctrl_table.update(extra_model_control_table)
-
- self.model_resolution = deepcopy(MODEL_RESOLUTION)
- if extra_model_resolution:
- self.model_resolution.update(extra_model_resolution)
-
- self.port_handler = None
- self.packet_handler = None
- self.calibration = None
- self.is_connected = False
- self.group_readers = {}
- self.group_writers = {}
- self.logs = {}
-
- self.track_positions = {}
-
- def connect(self):
- if self.is_connected:
- raise RobotDeviceAlreadyConnectedError(
- f"FeetechMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice."
- )
-
- if self.mock:
- import tests.mock_scservo_sdk as scs
- else:
- import scservo_sdk as scs
-
- self.port_handler = scs.PortHandler(self.port)
- self.packet_handler = scs.PacketHandler(PROTOCOL_VERSION)
-
- try:
- if not self.port_handler.openPort():
- raise OSError(f"Failed to open port '{self.port}'.")
- except Exception:
- traceback.print_exc()
- print(
- "\nTry running `python lerobot/scripts/find_motors_bus_port.py` to make sure you are using the correct port.\n"
- )
- raise
-
- # Allow to read and write
- self.is_connected = True
-
- self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS)
-
- def reconnect(self):
- if self.mock:
- import tests.mock_scservo_sdk as scs
- else:
- import scservo_sdk as scs
-
- self.port_handler = scs.PortHandler(self.port)
- self.packet_handler = scs.PacketHandler(PROTOCOL_VERSION)
-
- if not self.port_handler.openPort():
- raise OSError(f"Failed to open port '{self.port}'.")
-
- self.is_connected = True
-
- def are_motors_configured(self):
- # Only check the motor indices and not baudrate, since if the motor baudrates are incorrect,
- # a ConnectionError will be raised anyway.
- try:
- return (self.motor_indices == self.read("ID")).all()
- except ConnectionError as e:
- print(e)
- return False
-
- def find_motor_indices(self, possible_ids=None, num_retry=2):
- if possible_ids is None:
- possible_ids = range(MAX_ID_RANGE)
-
- indices = []
- for idx in tqdm.tqdm(possible_ids):
- try:
- present_idx = self.read_with_motor_ids(self.motor_models, [idx], "ID", num_retry=num_retry)[0]
- except ConnectionError:
- continue
-
- if idx != present_idx:
- # sanity check
- raise OSError(
- "Motor index used to communicate through the bus is not the same as the one present in the motor memory. The motor memory might be damaged."
- )
- indices.append(idx)
-
- return indices
-
- def set_bus_baudrate(self, baudrate):
- present_bus_baudrate = self.port_handler.getBaudRate()
- if present_bus_baudrate != baudrate:
- print(f"Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.")
- self.port_handler.setBaudRate(baudrate)
-
- if self.port_handler.getBaudRate() != baudrate:
- raise OSError("Failed to write bus baud rate.")
-
- @property
- def motor_names(self) -> list[str]:
- return list(self.motors.keys())
-
- @property
- def motor_models(self) -> list[str]:
- return [model for _, model in self.motors.values()]
-
- @property
- def motor_indices(self) -> list[int]:
- return [idx for idx, _ in self.motors.values()]
-
- def set_calibration(self, calibration: dict[str, list]):
- self.calibration = calibration
-
- def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None):
- """This function apply the calibration, automatically detects out of range errors for motors values and attempt to correct.
-
- For more info, see docstring of `apply_calibration` and `autocorrect_calibration`.
- """
- try:
- values = self.apply_calibration(values, motor_names)
- except JointOutOfRangeError as e:
- print(e)
- self.autocorrect_calibration(values, motor_names)
- values = self.apply_calibration(values, motor_names)
- return values
-
- def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
- """Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
- a "zero position" at 0 degree.
-
- Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
- rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
-
- Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
- when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
- at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
- or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
- To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
- in the centered nominal degree range ]-180, 180[.
- """
- if motor_names is None:
- motor_names = self.motor_names
-
- # Convert from unsigned int32 original range [0, 2**32] to signed float32 range
- values = values.astype(np.float32)
-
- for i, name in enumerate(motor_names):
- calib_idx = self.calibration["motor_names"].index(name)
- calib_mode = self.calibration["calib_mode"][calib_idx]
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- drive_mode = self.calibration["drive_mode"][calib_idx]
- homing_offset = self.calibration["homing_offset"][calib_idx]
- _, model = self.motors[name]
- resolution = self.model_resolution[model]
-
- # Update direction of rotation of the motor to match between leader and follower.
- # In fact, the motor of the leader for a given joint can be assembled in an
- # opposite direction in term of rotation than the motor of the follower on the same joint.
- if drive_mode:
- values[i] *= -1
-
- # Convert from range [-2**31, 2**31[ to
- # nominal range ]-resolution, resolution[ (e.g. ]-2048, 2048[)
- values[i] += homing_offset
-
- # Convert from range ]-resolution, resolution[ to
- # universal float32 centered degree range ]-180, 180[
- values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE
-
- if (values[i] < LOWER_BOUND_DEGREE) or (values[i] > UPPER_BOUND_DEGREE):
- raise JointOutOfRangeError(
- f"Wrong motor position range detected for {name}. "
- f"Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), "
- f"with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, "
- f"but present value is {values[i]} degree. "
- "This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. "
- "You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
- )
-
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- start_pos = self.calibration["start_pos"][calib_idx]
- end_pos = self.calibration["end_pos"][calib_idx]
-
- # Rescale the present position to a nominal range [0, 100] %,
- # useful for joints with linear motions like Aloha gripper
- values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
-
- if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
- raise JointOutOfRangeError(
- f"Wrong motor position range detected for {name}. "
- f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
- f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
- f"but present value is {values[i]} %. "
- "This might be due to a cable connection issue creating an artificial jump in motor values. "
- "You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
- )
-
- return values
-
- def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
- """This function automatically detects issues with values of motors after calibration, and correct for these issues.
-
- Some motors might have values outside of expected maximum bounds after calibration.
- For instance, for a joint in degree, its value can be outside [-270, 270] degrees, which is totally unexpected given
- a nominal range of [-180, 180] degrees, which represents half a turn to the left or right starting from zero position.
-
- Known issues:
- #1: Motor value randomly shifts of a full turn, caused by hardware/connection errors.
- #2: Motor internal homing offset is shifted of a full turn, caused by using default calibration (e.g Aloha).
- #3: motor internal homing offset is shifted of less or more than a full turn, caused by using default calibration
- or by human error during manual calibration.
-
- Issues #1 and #2 can be solved by shifting the calibration homing offset by a full turn.
- Issue #3 will be visually detected by user and potentially captured by the safety feature `max_relative_target`,
- that will slow down the motor, raise an error asking to recalibrate. Manual recalibrating will solve the issue.
-
- Note: A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
- """
- if motor_names is None:
- motor_names = self.motor_names
-
- # Convert from unsigned int32 original range [0, 2**32] to signed float32 range
- values = values.astype(np.float32)
-
- for i, name in enumerate(motor_names):
- calib_idx = self.calibration["motor_names"].index(name)
- calib_mode = self.calibration["calib_mode"][calib_idx]
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- drive_mode = self.calibration["drive_mode"][calib_idx]
- homing_offset = self.calibration["homing_offset"][calib_idx]
- _, model = self.motors[name]
- resolution = self.model_resolution[model]
-
- if drive_mode:
- values[i] *= -1
-
- # Convert from initial range to range [-180, 180] degrees
- calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE
- in_range = (calib_val > LOWER_BOUND_DEGREE) and (calib_val < UPPER_BOUND_DEGREE)
-
- # Solve this inequality to find the factor to shift the range into [-180, 180] degrees
- # values[i] = (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE
- # - HALF_TURN_DEGREE <= (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE <= HALF_TURN_DEGREE
- # (- HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset) / resolution <= factor <= (HALF_TURN_DEGREE / 180 * (resolution // 2) - values[i] - homing_offset) / resolution
- low_factor = (
- -HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset
- ) / resolution
- upp_factor = (
- HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset
- ) / resolution
-
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- start_pos = self.calibration["start_pos"][calib_idx]
- end_pos = self.calibration["end_pos"][calib_idx]
-
- # Convert from initial range to range [0, 100] in %
- calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100
- in_range = (calib_val > LOWER_BOUND_LINEAR) and (calib_val < UPPER_BOUND_LINEAR)
-
- # Solve this inequality to find the factor to shift the range into [0, 100] %
- # values[i] = (values[i] - start_pos + resolution * factor) / (end_pos + resolution * factor - start_pos - resolution * factor) * 100
- # values[i] = (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100
- # 0 <= (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 <= 100
- # (start_pos - values[i]) / resolution <= factor <= (end_pos - values[i]) / resolution
- low_factor = (start_pos - values[i]) / resolution
- upp_factor = (end_pos - values[i]) / resolution
-
- if not in_range:
- # Get first integer between the two bounds
- if low_factor < upp_factor:
- factor = math.ceil(low_factor)
-
- if factor > upp_factor:
- raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
- else:
- factor = math.ceil(upp_factor)
-
- if factor > low_factor:
- raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- out_of_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
- in_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- out_of_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
- in_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
-
- logging.warning(
- f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, "
- f"from '{out_of_range_str}' to '{in_range_str}'."
- )
-
- # A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
- self.calibration["homing_offset"][calib_idx] += resolution * factor
-
- def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
- """Inverse of `apply_calibration`."""
- if motor_names is None:
- motor_names = self.motor_names
-
- for i, name in enumerate(motor_names):
- calib_idx = self.calibration["motor_names"].index(name)
- calib_mode = self.calibration["calib_mode"][calib_idx]
-
- if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
- drive_mode = self.calibration["drive_mode"][calib_idx]
- homing_offset = self.calibration["homing_offset"][calib_idx]
- _, model = self.motors[name]
- resolution = self.model_resolution[model]
-
- # Convert from nominal 0-centered degree range [-180, 180] to
- # 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
- values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
-
- # Substract the homing offsets to come back to actual motor range of values
- # which can be arbitrary.
- values[i] -= homing_offset
-
- # Remove drive mode, which is the rotation direction of the motor, to come back to
- # actual motor rotation direction which can be arbitrary.
- if drive_mode:
- values[i] *= -1
-
- elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
- start_pos = self.calibration["start_pos"][calib_idx]
- end_pos = self.calibration["end_pos"][calib_idx]
-
- # Convert from nominal lnear range of [0, 100] % to
- # actual motor range of values which can be arbitrary.
- values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
-
- values = np.round(values).astype(np.int32)
- return values
-
- def avoid_rotation_reset(self, values, motor_names, data_name):
- if data_name not in self.track_positions:
- self.track_positions[data_name] = {
- "prev": [None] * len(self.motor_names),
- # Assume False at initialization
- "below_zero": [False] * len(self.motor_names),
- "above_max": [False] * len(self.motor_names),
- }
-
- track = self.track_positions[data_name]
-
- if motor_names is None:
- motor_names = self.motor_names
-
- for i, name in enumerate(motor_names):
- idx = self.motor_names.index(name)
-
- if track["prev"][idx] is None:
- track["prev"][idx] = values[i]
- continue
-
- # Detect a full rotation occured
- if abs(track["prev"][idx] - values[i]) > 2048:
- # Position went below 0 and got reset to 4095
- if track["prev"][idx] < values[i]:
- # So we set negative value by adding a full rotation
- values[i] -= 4096
-
- # Position went above 4095 and got reset to 0
- elif track["prev"][idx] > values[i]:
- # So we add a full rotation
- values[i] += 4096
-
- track["prev"][idx] = values[i]
-
- return values
-
- def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
- if self.mock:
- import tests.mock_scservo_sdk as scs
- else:
- import scservo_sdk as scs
-
- return_list = True
- if not isinstance(motor_ids, list):
- return_list = False
- motor_ids = [motor_ids]
-
- assert_same_address(self.model_ctrl_table, self.motor_models, data_name)
- addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
- group = scs.GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes)
- for idx in motor_ids:
- group.addParam(idx)
-
- for _ in range(num_retry):
- comm = group.txRxPacket()
- if comm == scs.COMM_SUCCESS:
- break
-
- if comm != scs.COMM_SUCCESS:
- raise ConnectionError(
- f"Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- values = []
- for idx in motor_ids:
- value = group.getData(idx, addr, bytes)
- values.append(value)
-
- if return_list:
- return values
- else:
- return values[0]
-
- def read(self, data_name, motor_names: str | list[str] | None = None):
- if self.mock:
- import tests.mock_scservo_sdk as scs
- else:
- import scservo_sdk as scs
-
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"FeetechMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
- )
-
- start_time = time.perf_counter()
-
- if motor_names is None:
- motor_names = self.motor_names
-
- if isinstance(motor_names, str):
- motor_names = [motor_names]
-
- motor_ids = []
- models = []
- for name in motor_names:
- motor_idx, model = self.motors[name]
- motor_ids.append(motor_idx)
- models.append(model)
-
- assert_same_address(self.model_ctrl_table, models, data_name)
- addr, bytes = self.model_ctrl_table[model][data_name]
- group_key = get_group_sync_key(data_name, motor_names)
-
- if data_name not in self.group_readers:
- # create new group reader
- self.group_readers[group_key] = scs.GroupSyncRead(
- self.port_handler, self.packet_handler, addr, bytes
- )
- for idx in motor_ids:
- self.group_readers[group_key].addParam(idx)
-
- for _ in range(NUM_READ_RETRY):
- comm = self.group_readers[group_key].txRxPacket()
- if comm == scs.COMM_SUCCESS:
- break
-
- if comm != scs.COMM_SUCCESS:
- raise ConnectionError(
- f"Read failed due to communication error on port {self.port} for group_key {group_key}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- values = []
- for idx in motor_ids:
- value = self.group_readers[group_key].getData(idx, addr, bytes)
- values.append(value)
-
- values = np.array(values)
-
- # Convert to signed int to use range [-2048, 2048] for our motor positions.
- if data_name in CONVERT_UINT32_TO_INT32_REQUIRED:
- values = values.astype(np.int32)
-
- if data_name in CALIBRATION_REQUIRED:
- values = self.avoid_rotation_reset(values, motor_names, data_name)
-
- if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
- values = self.apply_calibration_autocorrect(values, motor_names)
-
- # log the number of seconds it took to read the data from the motors
- delta_ts_name = get_log_name("delta_timestamp_s", "read", data_name, motor_names)
- self.logs[delta_ts_name] = time.perf_counter() - start_time
-
- # log the utc time at which the data was received
- ts_utc_name = get_log_name("timestamp_utc", "read", data_name, motor_names)
- self.logs[ts_utc_name] = capture_timestamp_utc()
-
- return values
-
- def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
- if self.mock:
- import tests.mock_scservo_sdk as scs
- else:
- import scservo_sdk as scs
-
- if not isinstance(motor_ids, list):
- motor_ids = [motor_ids]
- if not isinstance(values, list):
- values = [values]
-
- assert_same_address(self.model_ctrl_table, motor_models, data_name)
- addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
- group = scs.GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes)
- for idx, value in zip(motor_ids, values, strict=True):
- data = convert_to_bytes(value, bytes, self.mock)
- group.addParam(idx, data)
-
- for _ in range(num_retry):
- comm = group.txPacket()
- if comm == scs.COMM_SUCCESS:
- break
-
- if comm != scs.COMM_SUCCESS:
- raise ConnectionError(
- f"Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"FeetechMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
- )
-
- start_time = time.perf_counter()
-
- if self.mock:
- import tests.mock_scservo_sdk as scs
- else:
- import scservo_sdk as scs
-
- if motor_names is None:
- motor_names = self.motor_names
-
- if isinstance(motor_names, str):
- motor_names = [motor_names]
-
- if isinstance(values, (int, float, np.integer)):
- values = [int(values)] * len(motor_names)
-
- values = np.array(values)
-
- motor_ids = []
- models = []
- for name in motor_names:
- motor_idx, model = self.motors[name]
- motor_ids.append(motor_idx)
- models.append(model)
-
- if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
- values = self.revert_calibration(values, motor_names)
-
- values = values.tolist()
-
- assert_same_address(self.model_ctrl_table, models, data_name)
- addr, bytes = self.model_ctrl_table[model][data_name]
- group_key = get_group_sync_key(data_name, motor_names)
-
- init_group = data_name not in self.group_readers
- if init_group:
- self.group_writers[group_key] = scs.GroupSyncWrite(
- self.port_handler, self.packet_handler, addr, bytes
- )
-
- for idx, value in zip(motor_ids, values, strict=True):
- data = convert_to_bytes(value, bytes, self.mock)
- if init_group:
- self.group_writers[group_key].addParam(idx, data)
- else:
- self.group_writers[group_key].changeParam(idx, data)
-
- comm = self.group_writers[group_key].txPacket()
- if comm != scs.COMM_SUCCESS:
- raise ConnectionError(
- f"Write failed due to communication error on port {self.port} for group_key {group_key}: "
- f"{self.packet_handler.getTxRxResult(comm)}"
- )
-
- # log the number of seconds it took to write the data to the motors
- delta_ts_name = get_log_name("delta_timestamp_s", "write", data_name, motor_names)
- self.logs[delta_ts_name] = time.perf_counter() - start_time
-
- # TODO(rcadene): should we log the time before sending the write command?
- # log the utc time when the write has been completed
- ts_utc_name = get_log_name("timestamp_utc", "write", data_name, motor_names)
- self.logs[ts_utc_name] = capture_timestamp_utc()
-
- def disconnect(self):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- f"FeetechMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first."
- )
-
- if self.port_handler is not None:
- self.port_handler.closePort()
- self.port_handler = None
-
- self.packet_handler = None
- self.group_readers = {}
- self.group_writers = {}
- self.is_connected = False
-
- def __del__(self):
- if getattr(self, "is_connected", False):
- self.disconnect()
diff --git a/lerobot/common/robot_devices/motors/utils.py b/lerobot/common/robot_devices/motors/utils.py
deleted file mode 100644
index 9ba314cea1..0000000000
--- a/lerobot/common/robot_devices/motors/utils.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from typing import Protocol
-
-
-class MotorsBus(Protocol):
- def motor_names(self): ...
- def set_calibration(self): ...
- def apply_calibration(self): ...
- def revert_calibration(self): ...
- def read(self): ...
- def write(self): ...
diff --git a/lerobot/common/robot_devices/robots/dynamixel_calibration.py b/lerobot/common/robot_devices/robots/dynamixel_calibration.py
deleted file mode 100644
index 5c4932d2e7..0000000000
--- a/lerobot/common/robot_devices/robots/dynamixel_calibration.py
+++ /dev/null
@@ -1,130 +0,0 @@
-"""Logic to calibrate a robot arm built with dynamixel motors"""
-# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
-
-import numpy as np
-
-from lerobot.common.robot_devices.motors.dynamixel import (
- CalibrationMode,
- TorqueMode,
- convert_degrees_to_steps,
-)
-from lerobot.common.robot_devices.motors.utils import MotorsBus
-
-URL_TEMPLATE = (
- "https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
-)
-
-# The following positions are provided in nominal degree range ]-180, +180[
-# For more info on these constants, see comments in the code where they get used.
-ZERO_POSITION_DEGREE = 0
-ROTATED_POSITION_DEGREE = 90
-
-
-def assert_drive_mode(drive_mode):
- # `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
- if not np.all(np.isin(drive_mode, [0, 1])):
- raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})")
-
-
-def apply_drive_mode(position, drive_mode):
- assert_drive_mode(drive_mode)
- # Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted,
- # to [-1, 1] with 1 indicates original rotation direction and -1 inverted.
- signed_drive_mode = -(drive_mode * 2 - 1)
- position *= signed_drive_mode
- return position
-
-
-def compute_nearest_rounded_position(position, models):
- delta_turn = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, models)
- nearest_pos = np.round(position.astype(float) / delta_turn) * delta_turn
- return nearest_pos.astype(position.dtype)
-
-
-def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
- """This function ensures that a neural network trained on data collected on a given robot
- can work on another robot. For instance before calibration, setting a same goal position
- for each motor of two different robots will get two very different positions. But after calibration,
- the two robots will move to the same position.To this end, this function computes the homing offset
- and the drive mode for each motor of a given robot.
-
- Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps
- to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions
- being 0. During the calibration process, you will need to manually move the robot to this "zero position".
-
- Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled
- in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot
- to the "rotated position".
-
- After calibration, the homing offsets and drive modes are stored in a cache.
-
- Example of usage:
- ```python
- run_arm_calibration(arm, "koch", "left", "follower")
- ```
- """
- if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
- raise ValueError("To run calibration, the torque must be disabled on all motors.")
-
- print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
-
- print("\nMove arm to zero position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero"))
- input("Press Enter to continue...")
-
- # We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed.
- # It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will
- # correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position.
- zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models)
-
- # Compute homing offset so that `present_position + homing_offset ~= target_position`.
- zero_pos = arm.read("Present_Position")
- zero_nearest_pos = compute_nearest_rounded_position(zero_pos, arm.motor_models)
- homing_offset = zero_target_pos - zero_nearest_pos
-
- # The rotated target position corresponds to a rotation of a quarter turn from the zero position.
- # This allows to identify the rotation direction of each motor.
- # For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
- # is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
- # Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
- # corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
- # of the previous motor in the kinetic chain.
- print("\nMove arm to rotated target position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
- input("Press Enter to continue...")
-
- rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models)
-
- # Find drive mode by rotating each motor by a quarter of a turn.
- # Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0).
- rotated_pos = arm.read("Present_Position")
- drive_mode = (rotated_pos < zero_pos).astype(np.int32)
-
- # Re-compute homing offset to take into account drive mode
- rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode)
- rotated_nearest_pos = compute_nearest_rounded_position(rotated_drived_pos, arm.motor_models)
- homing_offset = rotated_target_pos - rotated_nearest_pos
-
- print("\nMove arm to rest position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest"))
- input("Press Enter to continue...")
- print()
-
- # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
- calib_mode = [CalibrationMode.DEGREE.name] * len(arm.motor_names)
-
- # TODO(rcadene): make type of joints (DEGREE or LINEAR) configurable from yaml?
- if robot_type in ["aloha"] and "gripper" in arm.motor_names:
- # Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
- calib_idx = arm.motor_names.index("gripper")
- calib_mode[calib_idx] = CalibrationMode.LINEAR.name
-
- calib_data = {
- "homing_offset": homing_offset.tolist(),
- "drive_mode": drive_mode.tolist(),
- "start_pos": zero_pos.tolist(),
- "end_pos": rotated_pos.tolist(),
- "calib_mode": calib_mode,
- "motor_names": arm.motor_names,
- }
- return calib_data
diff --git a/lerobot/common/robot_devices/robots/factory.py b/lerobot/common/robot_devices/robots/factory.py
deleted file mode 100644
index 17e8e5e6a9..0000000000
--- a/lerobot/common/robot_devices/robots/factory.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import hydra
-from omegaconf import DictConfig
-
-from lerobot.common.robot_devices.robots.utils import Robot
-
-
-def make_robot(cfg: DictConfig) -> Robot:
- robot = hydra.utils.instantiate(cfg)
- return robot
diff --git a/lerobot/common/robot_devices/robots/feetech_calibration.py b/lerobot/common/robot_devices/robots/feetech_calibration.py
deleted file mode 100644
index b015951a0c..0000000000
--- a/lerobot/common/robot_devices/robots/feetech_calibration.py
+++ /dev/null
@@ -1,484 +0,0 @@
-"""Logic to calibrate a robot arm built with feetech motors"""
-# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
-
-import time
-
-import numpy as np
-
-from lerobot.common.robot_devices.motors.feetech import (
- CalibrationMode,
- TorqueMode,
- convert_degrees_to_steps,
-)
-from lerobot.common.robot_devices.motors.utils import MotorsBus
-
-URL_TEMPLATE = (
- "https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
-)
-
-# The following positions are provided in nominal degree range ]-180, +180[
-# For more info on these constants, see comments in the code where they get used.
-ZERO_POSITION_DEGREE = 0
-ROTATED_POSITION_DEGREE = 90
-
-
-def assert_drive_mode(drive_mode):
- # `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
- if not np.all(np.isin(drive_mode, [0, 1])):
- raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})")
-
-
-def apply_drive_mode(position, drive_mode):
- assert_drive_mode(drive_mode)
- # Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted,
- # to [-1, 1] with 1 indicates original rotation direction and -1 inverted.
- signed_drive_mode = -(drive_mode * 2 - 1)
- position *= signed_drive_mode
- return position
-
-
-def move_until_block(arm, motor_name, positive_direction=True, while_move_hook=None):
- count = 0
- while True:
- present_pos = arm.read("Present_Position", motor_name)
- if positive_direction:
- # Move +100 steps every time. Lower the steps to lower the speed at which the arm moves.
- arm.write("Goal_Position", present_pos + 100, motor_name)
- else:
- arm.write("Goal_Position", present_pos - 100, motor_name)
-
- if while_move_hook is not None:
- while_move_hook()
-
- present_pos = arm.read("Present_Position", motor_name).item()
- present_speed = arm.read("Present_Speed", motor_name).item()
- present_current = arm.read("Present_Current", motor_name).item()
- # present_load = arm.read("Present_Load", motor_name).item()
- # present_voltage = arm.read("Present_Voltage", motor_name).item()
- # present_temperature = arm.read("Present_Temperature", motor_name).item()
-
- # print(f"{present_pos=}")
- # print(f"{present_speed=}")
- # print(f"{present_current=}")
- # print(f"{present_load=}")
- # print(f"{present_voltage=}")
- # print(f"{present_temperature=}")
-
- if present_speed == 0 and present_current > 40:
- count += 1
- if count > 100 or present_current > 300:
- return present_pos
- else:
- count = 0
-
-
-def move_to_calibrate(
- arm,
- motor_name,
- invert_drive_mode=False,
- positive_first=True,
- in_between_move_hook=None,
- while_move_hook=None,
-):
- initial_pos = arm.read("Present_Position", motor_name)
-
- if positive_first:
- p_present_pos = move_until_block(
- arm, motor_name, positive_direction=True, while_move_hook=while_move_hook
- )
- else:
- n_present_pos = move_until_block(
- arm, motor_name, positive_direction=False, while_move_hook=while_move_hook
- )
-
- if in_between_move_hook is not None:
- in_between_move_hook()
-
- if positive_first:
- n_present_pos = move_until_block(
- arm, motor_name, positive_direction=False, while_move_hook=while_move_hook
- )
- else:
- p_present_pos = move_until_block(
- arm, motor_name, positive_direction=True, while_move_hook=while_move_hook
- )
-
- zero_pos = (n_present_pos + p_present_pos) / 2
-
- calib_data = {
- "initial_pos": initial_pos,
- "homing_offset": zero_pos if invert_drive_mode else -zero_pos,
- "invert_drive_mode": invert_drive_mode,
- "drive_mode": -1 if invert_drive_mode else 0,
- "zero_pos": zero_pos,
- "start_pos": n_present_pos if invert_drive_mode else p_present_pos,
- "end_pos": p_present_pos if invert_drive_mode else n_present_pos,
- }
- return calib_data
-
-
-def apply_offset(calib, offset):
- calib["zero_pos"] += offset
- if calib["drive_mode"]:
- calib["homing_offset"] += offset
- else:
- calib["homing_offset"] -= offset
- return calib
-
-
-def run_arm_auto_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
- if robot_type == "so100":
- return run_arm_auto_calibration_so100(arm, robot_type, arm_name, arm_type)
- elif robot_type == "moss":
- return run_arm_auto_calibration_moss(arm, robot_type, arm_name, arm_type)
- else:
- raise ValueError(robot_type)
-
-
-def run_arm_auto_calibration_so100(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
- """All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms"""
- if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
- raise ValueError("To run calibration, the torque must be disabled on all motors.")
-
- if not (robot_type == "so100" and arm_type == "follower"):
- raise NotImplementedError("Auto calibration only supports the follower of so100 arms for now.")
-
- print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
-
- print("\nMove arm to initial position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial"))
- input("Press Enter to continue...")
-
- # Lower the acceleration of the motors (in [0,254])
- initial_acceleration = arm.read("Acceleration")
- arm.write("Lock", 0)
- arm.write("Acceleration", 10)
- time.sleep(1)
-
- arm.write("Torque_Enable", TorqueMode.ENABLED.value)
-
- print(f'{arm.read("Present_Position", "elbow_flex")=}')
-
- calib = {}
-
- init_wf_pos = arm.read("Present_Position", "wrist_flex")
- init_sl_pos = arm.read("Present_Position", "shoulder_lift")
- init_ef_pos = arm.read("Present_Position", "elbow_flex")
- arm.write("Goal_Position", init_wf_pos - 800, "wrist_flex")
- arm.write("Goal_Position", init_sl_pos + 150 + 1024, "shoulder_lift")
- arm.write("Goal_Position", init_ef_pos - 2048, "elbow_flex")
- time.sleep(2)
-
- print("Calibrate shoulder_pan")
- calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan")
- arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
- time.sleep(1)
-
- print("Calibrate gripper")
- calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True)
- time.sleep(1)
-
- print("Calibrate wrist_flex")
- calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex")
- calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=80)
-
- def in_between_move_hook():
- nonlocal arm, calib
- time.sleep(2)
- ef_pos = arm.read("Present_Position", "elbow_flex")
- sl_pos = arm.read("Present_Position", "shoulder_lift")
- arm.write("Goal_Position", ef_pos + 1024, "elbow_flex")
- arm.write("Goal_Position", sl_pos - 1024, "shoulder_lift")
- time.sleep(2)
-
- print("Calibrate elbow_flex")
- calib["elbow_flex"] = move_to_calibrate(
- arm, "elbow_flex", positive_first=False, in_between_move_hook=in_between_move_hook
- )
- calib["elbow_flex"] = apply_offset(calib["elbow_flex"], offset=80 - 1024)
-
- arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1024 + 512, "elbow_flex")
- time.sleep(1)
-
- def in_between_move_hook():
- nonlocal arm, calib
- arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"], "elbow_flex")
-
- print("Calibrate shoulder_lift")
- calib["shoulder_lift"] = move_to_calibrate(
- arm,
- "shoulder_lift",
- invert_drive_mode=True,
- positive_first=False,
- in_between_move_hook=in_between_move_hook,
- )
- # add an 30 steps as offset to align with body
- calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=1024 - 50)
-
- def while_move_hook():
- nonlocal arm, calib
- positions = {
- "shoulder_lift": round(calib["shoulder_lift"]["zero_pos"] - 1600),
- "elbow_flex": round(calib["elbow_flex"]["zero_pos"] + 1700),
- "wrist_flex": round(calib["wrist_flex"]["zero_pos"] + 800),
- "gripper": round(calib["gripper"]["end_pos"]),
- }
- arm.write("Goal_Position", list(positions.values()), list(positions.keys()))
-
- arm.write("Goal_Position", round(calib["shoulder_lift"]["zero_pos"] - 1600), "shoulder_lift")
- time.sleep(2)
- arm.write("Goal_Position", round(calib["elbow_flex"]["zero_pos"] + 1700), "elbow_flex")
- time.sleep(2)
- arm.write("Goal_Position", round(calib["wrist_flex"]["zero_pos"] + 800), "wrist_flex")
- time.sleep(2)
- arm.write("Goal_Position", round(calib["gripper"]["end_pos"]), "gripper")
- time.sleep(2)
-
- print("Calibrate wrist_roll")
- calib["wrist_roll"] = move_to_calibrate(
- arm, "wrist_roll", invert_drive_mode=True, positive_first=False, while_move_hook=while_move_hook
- )
-
- arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll")
- time.sleep(1)
- arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper")
- time.sleep(1)
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex")
- time.sleep(1)
- arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 2048, "elbow_flex")
- arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] - 2048, "shoulder_lift")
- time.sleep(1)
- arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
- time.sleep(1)
-
- calib_modes = []
- for name in arm.motor_names:
- if name == "gripper":
- calib_modes.append(CalibrationMode.LINEAR.name)
- else:
- calib_modes.append(CalibrationMode.DEGREE.name)
-
- calib_dict = {
- "homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names],
- "drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names],
- "start_pos": [calib[name]["start_pos"] for name in arm.motor_names],
- "end_pos": [calib[name]["end_pos"] for name in arm.motor_names],
- "calib_mode": calib_modes,
- "motor_names": arm.motor_names,
- }
-
- # Re-enable original accerlation
- arm.write("Lock", 0)
- arm.write("Acceleration", initial_acceleration)
- time.sleep(1)
-
- return calib_dict
-
-
-def run_arm_auto_calibration_moss(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
- """All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms"""
- if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
- raise ValueError("To run calibration, the torque must be disabled on all motors.")
-
- if not (robot_type == "moss" and arm_type == "follower"):
- raise NotImplementedError("Auto calibration only supports the follower of moss arms for now.")
-
- print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
-
- print("\nMove arm to initial position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial"))
- input("Press Enter to continue...")
-
- # Lower the acceleration of the motors (in [0,254])
- initial_acceleration = arm.read("Acceleration")
- arm.write("Lock", 0)
- arm.write("Acceleration", 10)
- time.sleep(1)
-
- arm.write("Torque_Enable", TorqueMode.ENABLED.value)
-
- sl_pos = arm.read("Present_Position", "shoulder_lift")
- arm.write("Goal_Position", sl_pos - 1024 - 450, "shoulder_lift")
- ef_pos = arm.read("Present_Position", "elbow_flex")
- arm.write("Goal_Position", ef_pos + 1024 + 450, "elbow_flex")
- time.sleep(2)
-
- calib = {}
-
- print("Calibrate shoulder_pan")
- calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan")
- arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
- time.sleep(1)
-
- print("Calibrate gripper")
- calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True)
- time.sleep(1)
-
- print("Calibrate wrist_flex")
- calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex", invert_drive_mode=True)
- calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=-210 + 1024)
-
- wr_pos = arm.read("Present_Position", "wrist_roll")
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
- time.sleep(1)
- arm.write("Goal_Position", wr_pos - 1024, "wrist_roll")
- time.sleep(1)
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex")
- time.sleep(1)
- arm.write("Goal_Position", calib["gripper"]["end_pos"], "gripper")
- time.sleep(1)
-
- print("Calibrate wrist_roll")
- calib["wrist_roll"] = move_to_calibrate(arm, "wrist_roll", invert_drive_mode=True)
- calib["wrist_roll"] = apply_offset(calib["wrist_roll"], offset=790)
-
- arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"] - 1024, "wrist_roll")
- arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper")
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
- time.sleep(1)
- arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll")
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex")
-
- def in_between_move_elbow_flex_hook():
- nonlocal arm, calib
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex")
-
- print("Calibrate elbow_flex")
- calib["elbow_flex"] = move_to_calibrate(
- arm,
- "elbow_flex",
- invert_drive_mode=True,
- in_between_move_hook=in_between_move_elbow_flex_hook,
- )
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
-
- def in_between_move_shoulder_lift_hook():
- nonlocal arm, calib
- sl = arm.read("Present_Position", "shoulder_lift")
- arm.write("Goal_Position", sl - 1500, "shoulder_lift")
- time.sleep(1)
- arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1536, "elbow_flex")
- time.sleep(1)
- arm.write("Goal_Position", calib["wrist_flex"]["start_pos"], "wrist_flex")
- time.sleep(1)
-
- print("Calibrate shoulder_lift")
- calib["shoulder_lift"] = move_to_calibrate(
- arm, "shoulder_lift", in_between_move_hook=in_between_move_shoulder_lift_hook
- )
- calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=-1024)
-
- arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
- time.sleep(1)
- arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] + 2048, "shoulder_lift")
- arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] - 1024 - 400, "elbow_flex")
- time.sleep(2)
-
- calib_modes = []
- for name in arm.motor_names:
- if name == "gripper":
- calib_modes.append(CalibrationMode.LINEAR.name)
- else:
- calib_modes.append(CalibrationMode.DEGREE.name)
-
- calib_dict = {
- "homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names],
- "drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names],
- "start_pos": [calib[name]["start_pos"] for name in arm.motor_names],
- "end_pos": [calib[name]["end_pos"] for name in arm.motor_names],
- "calib_mode": calib_modes,
- "motor_names": arm.motor_names,
- }
-
- # Re-enable original accerlation
- arm.write("Lock", 0)
- arm.write("Acceleration", initial_acceleration)
- time.sleep(1)
-
- return calib_dict
-
-
-def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
- """This function ensures that a neural network trained on data collected on a given robot
- can work on another robot. For instance before calibration, setting a same goal position
- for each motor of two different robots will get two very different positions. But after calibration,
- the two robots will move to the same position.To this end, this function computes the homing offset
- and the drive mode for each motor of a given robot.
-
- Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps
- to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions
- being 0. During the calibration process, you will need to manually move the robot to this "zero position".
-
- Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled
- in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot
- to the "rotated position".
-
- After calibration, the homing offsets and drive modes are stored in a cache.
-
- Example of usage:
- ```python
- run_arm_calibration(arm, "so100", "left", "follower")
- ```
- """
- if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
- raise ValueError("To run calibration, the torque must be disabled on all motors.")
-
- print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
-
- print("\nMove arm to zero position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero"))
- input("Press Enter to continue...")
-
- # We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed.
- # It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will
- # correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position.
- zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models)
-
- # Compute homing offset so that `present_position + homing_offset ~= target_position`.
- zero_pos = arm.read("Present_Position")
- homing_offset = zero_target_pos - zero_pos
-
- # The rotated target position corresponds to a rotation of a quarter turn from the zero position.
- # This allows to identify the rotation direction of each motor.
- # For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
- # is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
- # Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
- # corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
- # of the previous motor in the kinetic chain.
- print("\nMove arm to rotated target position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
- input("Press Enter to continue...")
-
- rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models)
-
- # Find drive mode by rotating each motor by a quarter of a turn.
- # Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0).
- rotated_pos = arm.read("Present_Position")
- drive_mode = (rotated_pos < zero_pos).astype(np.int32)
-
- # Re-compute homing offset to take into account drive mode
- rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode)
- homing_offset = rotated_target_pos - rotated_drived_pos
-
- print("\nMove arm to rest position")
- print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest"))
- input("Press Enter to continue...")
- print()
-
- # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
- calib_modes = []
- for name in arm.motor_names:
- if name == "gripper":
- calib_modes.append(CalibrationMode.LINEAR.name)
- else:
- calib_modes.append(CalibrationMode.DEGREE.name)
-
- calib_dict = {
- "homing_offset": homing_offset.tolist(),
- "drive_mode": drive_mode.tolist(),
- "start_pos": zero_pos.tolist(),
- "end_pos": rotated_pos.tolist(),
- "calib_mode": calib_modes,
- "motor_names": arm.motor_names,
- }
- return calib_dict
diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py
deleted file mode 100644
index 618105064a..0000000000
--- a/lerobot/common/robot_devices/robots/manipulator.py
+++ /dev/null
@@ -1,686 +0,0 @@
-"""Contains logic to instantiate a robot, read information from its motors and cameras,
-and send orders to its motors.
-"""
-# TODO(rcadene, aliberts): reorganize the codebase into one file per robot, with the associated
-# calibration procedure, to make it easy for people to add their own robot.
-
-import json
-import logging
-import time
-import warnings
-from dataclasses import dataclass, field, replace
-from pathlib import Path
-from typing import Sequence
-
-import numpy as np
-import torch
-
-from lerobot.common.robot_devices.cameras.utils import Camera
-from lerobot.common.robot_devices.motors.utils import MotorsBus
-from lerobot.common.robot_devices.robots.utils import get_arm_id
-from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
-
-
-def ensure_safe_goal_position(
- goal_pos: torch.Tensor, present_pos: torch.Tensor, max_relative_target: float | list[float]
-):
- # Cap relative action target magnitude for safety.
- diff = goal_pos - present_pos
- max_relative_target = torch.tensor(max_relative_target)
- safe_diff = torch.minimum(diff, max_relative_target)
- safe_diff = torch.maximum(safe_diff, -max_relative_target)
- safe_goal_pos = present_pos + safe_diff
-
- if not torch.allclose(goal_pos, safe_goal_pos):
- logging.warning(
- "Relative goal position magnitude had to be clamped to be safe.\n"
- f" requested relative goal position target: {diff}\n"
- f" clamped relative goal position target: {safe_diff}"
- )
-
- return safe_goal_pos
-
-
-@dataclass
-class ManipulatorRobotConfig:
- """
- Example of usage:
- ```python
- ManipulatorRobotConfig()
- ```
- """
-
- # Define all components of the robot
- robot_type: str = "koch"
- leader_arms: dict[str, MotorsBus] = field(default_factory=lambda: {})
- follower_arms: dict[str, MotorsBus] = field(default_factory=lambda: {})
- cameras: dict[str, Camera] = field(default_factory=lambda: {})
-
- # Optionally limit the magnitude of the relative positional target vector for safety purposes.
- # Set this to a positive scalar to have the same value for all motors, or a list that is the same length
- # as the number of motors in your follower arms (assumes all follower arms have the same number of
- # motors).
- max_relative_target: list[float] | float | None = None
-
- # Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it
- # possible to squeeze the gripper and have it spring back to an open position on its own. If None, the
- # gripper is not put in torque mode.
- gripper_open_degree: float | None = None
-
- def __setattr__(self, prop: str, val):
- if prop == "max_relative_target" and val is not None and isinstance(val, Sequence):
- for name in self.follower_arms:
- if len(self.follower_arms[name].motors) != len(val):
- raise ValueError(
- f"len(max_relative_target)={len(val)} but the follower arm with name {name} has "
- f"{len(self.follower_arms[name].motors)} motors. Please make sure that the "
- f"`max_relative_target` list has as many parameters as there are motors per arm. "
- "Note: This feature does not yet work with robots where different follower arms have "
- "different numbers of motors."
- )
- super().__setattr__(prop, val)
-
- def __post_init__(self):
- if self.robot_type not in ["koch", "koch_bimanual", "aloha", "so100", "moss"]:
- raise ValueError(f"Provided robot type ({self.robot_type}) is not supported.")
-
-
-class ManipulatorRobot:
- # TODO(rcadene): Implement force feedback
- """This class allows to control any manipulator robot of various number of motors.
-
- Non exaustive list of robots:
- - [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow expansion, developed
- by Alexander Koch from [Tau Robotics](https://tau-robotics.com)
- - [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss
- - [Aloha](https://www.trossenrobotics.com/aloha-kits) developed by Trossen Robotics
-
- Example of highest frequency teleoperation without camera:
- ```python
- # Defines how to communicate with the motors of the leader and follower arms
- leader_arms = {
- "main": DynamixelMotorsBus(
- port="/dev/tty.usbmodem575E0031751",
- motors={
- # name: (index, model)
- "shoulder_pan": (1, "xl330-m077"),
- "shoulder_lift": (2, "xl330-m077"),
- "elbow_flex": (3, "xl330-m077"),
- "wrist_flex": (4, "xl330-m077"),
- "wrist_roll": (5, "xl330-m077"),
- "gripper": (6, "xl330-m077"),
- },
- ),
- }
- follower_arms = {
- "main": DynamixelMotorsBus(
- port="/dev/tty.usbmodem575E0032081",
- motors={
- # name: (index, model)
- "shoulder_pan": (1, "xl430-w250"),
- "shoulder_lift": (2, "xl430-w250"),
- "elbow_flex": (3, "xl330-m288"),
- "wrist_flex": (4, "xl330-m288"),
- "wrist_roll": (5, "xl330-m288"),
- "gripper": (6, "xl330-m288"),
- },
- ),
- }
- robot = ManipulatorRobot(
- robot_type="koch",
- calibration_dir=".cache/calibration/koch",
- leader_arms=leader_arms,
- follower_arms=follower_arms,
- )
-
- # Connect motors buses and cameras if any (Required)
- robot.connect()
-
- while True:
- robot.teleop_step()
- ```
-
- Example of highest frequency data collection without camera:
- ```python
- # Assumes leader and follower arms have been instantiated already (see first example)
- robot = ManipulatorRobot(
- robot_type="koch",
- calibration_dir=".cache/calibration/koch",
- leader_arms=leader_arms,
- follower_arms=follower_arms,
- )
- robot.connect()
- while True:
- observation, action = robot.teleop_step(record_data=True)
- ```
-
- Example of highest frequency data collection with cameras:
- ```python
- # Defines how to communicate with 2 cameras connected to the computer.
- # Here, the webcam of the laptop and the phone (connected in USB to the laptop)
- # can be reached respectively using the camera indices 0 and 1. These indices can be
- # arbitrary. See the documentation of `OpenCVCamera` to find your own camera indices.
- cameras = {
- "laptop": OpenCVCamera(camera_index=0, fps=30, width=640, height=480),
- "phone": OpenCVCamera(camera_index=1, fps=30, width=640, height=480),
- }
-
- # Assumes leader and follower arms have been instantiated already (see first example)
- robot = ManipulatorRobot(
- robot_type="koch",
- calibration_dir=".cache/calibration/koch",
- leader_arms=leader_arms,
- follower_arms=follower_arms,
- cameras=cameras,
- )
- robot.connect()
- while True:
- observation, action = robot.teleop_step(record_data=True)
- ```
-
- Example of controlling the robot with a policy (without running multiple policies in parallel to ensure highest frequency):
- ```python
- # Assumes leader and follower arms + cameras have been instantiated already (see previous example)
- robot = ManipulatorRobot(
- robot_type="koch",
- calibration_dir=".cache/calibration/koch",
- leader_arms=leader_arms,
- follower_arms=follower_arms,
- cameras=cameras,
- )
- robot.connect()
- while True:
- # Uses the follower arms and cameras to capture an observation
- observation = robot.capture_observation()
-
- # Assumes a policy has been instantiated
- with torch.inference_mode():
- action = policy.select_action(observation)
-
- # Orders the robot to move
- robot.send_action(action)
- ```
-
- Example of disconnecting which is not mandatory since we disconnect when the object is deleted:
- ```python
- robot.disconnect()
- ```
- """
-
- def __init__(
- self,
- config: ManipulatorRobotConfig | None = None,
- calibration_dir: Path = ".cache/calibration/koch",
- **kwargs,
- ):
- if config is None:
- config = ManipulatorRobotConfig()
- # Overwrite config arguments using kwargs
- self.config = replace(config, **kwargs)
- self.calibration_dir = Path(calibration_dir)
-
- self.robot_type = self.config.robot_type
- self.leader_arms = self.config.leader_arms
- self.follower_arms = self.config.follower_arms
- self.cameras = self.config.cameras
- self.is_connected = False
- self.logs = {}
-
- def get_motor_names(self, arm: dict[str, MotorsBus]) -> list:
- return [f"{arm}_{motor}" for arm, bus in arm.items() for motor in bus.motors]
-
- @property
- def camera_features(self) -> dict:
- cam_ft = {}
- for cam_key, cam in self.cameras.items():
- key = f"observation.images.{cam_key}"
- cam_ft[key] = {
- "shape": (cam.height, cam.width, cam.channels),
- "names": ["height", "width", "channels"],
- "info": None,
- }
- return cam_ft
-
- @property
- def motor_features(self) -> dict:
- action_names = self.get_motor_names(self.leader_arms)
- state_names = self.get_motor_names(self.leader_arms)
- return {
- "action": {
- "dtype": "float32",
- "shape": (len(action_names),),
- "names": action_names,
- },
- "observation.state": {
- "dtype": "float32",
- "shape": (len(state_names),),
- "names": state_names,
- },
- }
-
- @property
- def features(self):
- return {**self.motor_features, **self.camera_features}
-
- @property
- def has_camera(self):
- return len(self.cameras) > 0
-
- @property
- def num_cameras(self):
- return len(self.cameras)
-
- @property
- def available_arms(self):
- available_arms = []
- for name in self.follower_arms:
- arm_id = get_arm_id(name, "follower")
- available_arms.append(arm_id)
- for name in self.leader_arms:
- arm_id = get_arm_id(name, "leader")
- available_arms.append(arm_id)
- return available_arms
-
- def connect(self):
- if self.is_connected:
- raise RobotDeviceAlreadyConnectedError(
- "ManipulatorRobot is already connected. Do not run `robot.connect()` twice."
- )
-
- if not self.leader_arms and not self.follower_arms and not self.cameras:
- raise ValueError(
- "ManipulatorRobot doesn't have any device to connect. See example of usage in docstring of the class."
- )
-
- # Connect the arms
- for name in self.follower_arms:
- print(f"Connecting {name} follower arm.")
- self.follower_arms[name].connect()
- for name in self.leader_arms:
- print(f"Connecting {name} leader arm.")
- self.leader_arms[name].connect()
-
- if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
- from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
- elif self.robot_type in ["so100", "moss"]:
- from lerobot.common.robot_devices.motors.feetech import TorqueMode
-
- # We assume that at connection time, arms are in a rest position, and torque can
- # be safely disabled to run calibration and/or set robot preset configurations.
- for name in self.follower_arms:
- self.follower_arms[name].write("Torque_Enable", TorqueMode.DISABLED.value)
- for name in self.leader_arms:
- self.leader_arms[name].write("Torque_Enable", TorqueMode.DISABLED.value)
-
- self.activate_calibration()
-
- # Set robot preset (e.g. torque in leader gripper for Koch v1.1)
- if self.robot_type in ["koch", "koch_bimanual"]:
- self.set_koch_robot_preset()
- elif self.robot_type == "aloha":
- self.set_aloha_robot_preset()
- elif self.robot_type in ["so100", "moss"]:
- self.set_so100_robot_preset()
-
- # Enable torque on all motors of the follower arms
- for name in self.follower_arms:
- print(f"Activating torque on {name} follower arm.")
- self.follower_arms[name].write("Torque_Enable", 1)
-
- if self.config.gripper_open_degree is not None:
- if self.robot_type not in ["koch", "koch_bimanual"]:
- raise NotImplementedError(
- f"{self.robot_type} does not support position AND current control in the handle, which is require to set the gripper open."
- )
- # Set the leader arm in torque mode with the gripper motor set to an angle. This makes it possible
- # to squeeze the gripper and have it spring back to an open position on its own.
- for name in self.leader_arms:
- self.leader_arms[name].write("Torque_Enable", 1, "gripper")
- self.leader_arms[name].write("Goal_Position", self.config.gripper_open_degree, "gripper")
-
- # Check both arms can be read
- for name in self.follower_arms:
- self.follower_arms[name].read("Present_Position")
- for name in self.leader_arms:
- self.leader_arms[name].read("Present_Position")
-
- # Connect the cameras
- for name in self.cameras:
- self.cameras[name].connect()
-
- self.is_connected = True
-
- def activate_calibration(self):
- """After calibration all motors function in human interpretable ranges.
- Rotations are expressed in degrees in nominal range of [-180, 180],
- and linear motions (like gripper of Aloha) in nominal range of [0, 100].
- """
-
- def load_or_run_calibration_(name, arm, arm_type):
- arm_id = get_arm_id(name, arm_type)
- arm_calib_path = self.calibration_dir / f"{arm_id}.json"
-
- if arm_calib_path.exists():
- with open(arm_calib_path) as f:
- calibration = json.load(f)
- else:
- # TODO(rcadene): display a warning in __init__ if calibration file not available
- print(f"Missing calibration file '{arm_calib_path}'")
-
- if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
- from lerobot.common.robot_devices.robots.dynamixel_calibration import run_arm_calibration
-
- calibration = run_arm_calibration(arm, self.robot_type, name, arm_type)
-
- elif self.robot_type in ["so100", "moss"]:
- from lerobot.common.robot_devices.robots.feetech_calibration import (
- run_arm_manual_calibration,
- )
-
- calibration = run_arm_manual_calibration(arm, self.robot_type, name, arm_type)
-
- print(f"Calibration is done! Saving calibration file '{arm_calib_path}'")
- arm_calib_path.parent.mkdir(parents=True, exist_ok=True)
- with open(arm_calib_path, "w") as f:
- json.dump(calibration, f)
-
- return calibration
-
- for name, arm in self.follower_arms.items():
- calibration = load_or_run_calibration_(name, arm, "follower")
- arm.set_calibration(calibration)
- for name, arm in self.leader_arms.items():
- calibration = load_or_run_calibration_(name, arm, "leader")
- arm.set_calibration(calibration)
-
- def set_koch_robot_preset(self):
- def set_operating_mode_(arm):
- from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
-
- if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
- raise ValueError("To run set robot preset, the torque must be disabled on all motors.")
-
- # Use 'extended position mode' for all motors except gripper, because in joint mode the servos can't
- # rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling the arm,
- # you could end up with a servo with a position 0 or 4095 at a crucial point See [
- # https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11]
- all_motors_except_gripper = [name for name in arm.motor_names if name != "gripper"]
- if len(all_motors_except_gripper) > 0:
- # 4 corresponds to Extended Position on Koch motors
- arm.write("Operating_Mode", 4, all_motors_except_gripper)
-
- # Use 'position control current based' for gripper to be limited by the limit of the current.
- # For the follower gripper, it means it can grasp an object without forcing too much even tho,
- # it's goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
- # For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger
- # to make it move, and it will move back to its original target position when we release the force.
- # 5 corresponds to Current Controlled Position on Koch gripper motors "xl330-m077, xl330-m288"
- arm.write("Operating_Mode", 5, "gripper")
-
- for name in self.follower_arms:
- set_operating_mode_(self.follower_arms[name])
-
- # Set better PID values to close the gap between recorded states and actions
- # TODO(rcadene): Implement an automatic procedure to set optimial PID values for each motor
- self.follower_arms[name].write("Position_P_Gain", 1500, "elbow_flex")
- self.follower_arms[name].write("Position_I_Gain", 0, "elbow_flex")
- self.follower_arms[name].write("Position_D_Gain", 600, "elbow_flex")
-
- if self.config.gripper_open_degree is not None:
- for name in self.leader_arms:
- set_operating_mode_(self.leader_arms[name])
-
- # Enable torque on the gripper of the leader arms, and move it to 45 degrees,
- # so that we can use it as a trigger to close the gripper of the follower arms.
- self.leader_arms[name].write("Torque_Enable", 1, "gripper")
- self.leader_arms[name].write("Goal_Position", self.config.gripper_open_degree, "gripper")
-
- def set_aloha_robot_preset(self):
- def set_shadow_(arm):
- # Set secondary/shadow ID for shoulder and elbow. These joints have two motors.
- # As a result, if only one of them is required to move to a certain position,
- # the other will follow. This is to avoid breaking the motors.
- if "shoulder_shadow" in arm.motor_names:
- shoulder_idx = arm.read("ID", "shoulder")
- arm.write("Secondary_ID", shoulder_idx, "shoulder_shadow")
-
- if "elbow_shadow" in arm.motor_names:
- elbow_idx = arm.read("ID", "elbow")
- arm.write("Secondary_ID", elbow_idx, "elbow_shadow")
-
- for name in self.follower_arms:
- set_shadow_(self.follower_arms[name])
-
- for name in self.leader_arms:
- set_shadow_(self.leader_arms[name])
-
- for name in self.follower_arms:
- # Set a velocity limit of 131 as advised by Trossen Robotics
- self.follower_arms[name].write("Velocity_Limit", 131)
-
- # Use 'extended position mode' for all motors except gripper, because in joint mode the servos can't
- # rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling the arm,
- # you could end up with a servo with a position 0 or 4095 at a crucial point See [
- # https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11]
- all_motors_except_gripper = [
- name for name in self.follower_arms[name].motor_names if name != "gripper"
- ]
- if len(all_motors_except_gripper) > 0:
- # 4 corresponds to Extended Position on Aloha motors
- self.follower_arms[name].write("Operating_Mode", 4, all_motors_except_gripper)
-
- # Use 'position control current based' for follower gripper to be limited by the limit of the current.
- # It can grasp an object without forcing too much even tho,
- # it's goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
- # 5 corresponds to Current Controlled Position on Aloha gripper follower "xm430-w350"
- self.follower_arms[name].write("Operating_Mode", 5, "gripper")
-
- # Note: We can't enable torque on the leader gripper since "xc430-w150" doesn't have
- # a Current Controlled Position mode.
-
- if self.config.gripper_open_degree is not None:
- warnings.warn(
- f"`gripper_open_degree` is set to {self.config.gripper_open_degree}, but None is expected for Aloha instead",
- stacklevel=1,
- )
-
- def set_so100_robot_preset(self):
- for name in self.follower_arms:
- # Mode=0 for Position Control
- self.follower_arms[name].write("Mode", 0)
- # Set P_Coefficient to lower value to avoid shakiness (Default is 32)
- self.follower_arms[name].write("P_Coefficient", 16)
- # Set I_Coefficient and D_Coefficient to default value 0 and 32
- self.follower_arms[name].write("I_Coefficient", 0)
- self.follower_arms[name].write("D_Coefficient", 32)
- # Close the write lock so that Maximum_Acceleration gets written to EPROM address,
- # which is mandatory for Maximum_Acceleration to take effect after rebooting.
- self.follower_arms[name].write("Lock", 0)
- # Set Maximum_Acceleration to 254 to speedup acceleration and deceleration of
- # the motors. Note: this configuration is not in the official STS3215 Memory Table
- self.follower_arms[name].write("Maximum_Acceleration", 254)
- self.follower_arms[name].write("Acceleration", 254)
-
- def teleop_step(
- self, record_data=False
- ) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- "ManipulatorRobot is not connected. You need to run `robot.connect()`."
- )
-
- # Prepare to assign the position of the leader to the follower
- leader_pos = {}
- for name in self.leader_arms:
- before_lread_t = time.perf_counter()
- leader_pos[name] = self.leader_arms[name].read("Present_Position")
- leader_pos[name] = torch.from_numpy(leader_pos[name])
- self.logs[f"read_leader_{name}_pos_dt_s"] = time.perf_counter() - before_lread_t
-
- # Send goal position to the follower
- follower_goal_pos = {}
- for name in self.follower_arms:
- before_fwrite_t = time.perf_counter()
- goal_pos = leader_pos[name]
-
- # Cap goal position when too far away from present position.
- # Slower fps expected due to reading from the follower.
- if self.config.max_relative_target is not None:
- present_pos = self.follower_arms[name].read("Present_Position")
- present_pos = torch.from_numpy(present_pos)
- goal_pos = ensure_safe_goal_position(goal_pos, present_pos, self.config.max_relative_target)
-
- # Used when record_data=True
- follower_goal_pos[name] = goal_pos
-
- goal_pos = goal_pos.numpy().astype(np.int32)
- self.follower_arms[name].write("Goal_Position", goal_pos)
- self.logs[f"write_follower_{name}_goal_pos_dt_s"] = time.perf_counter() - before_fwrite_t
-
- # Early exit when recording data is not requested
- if not record_data:
- return
-
- # TODO(rcadene): Add velocity and other info
- # Read follower position
- follower_pos = {}
- for name in self.follower_arms:
- before_fread_t = time.perf_counter()
- follower_pos[name] = self.follower_arms[name].read("Present_Position")
- follower_pos[name] = torch.from_numpy(follower_pos[name])
- self.logs[f"read_follower_{name}_pos_dt_s"] = time.perf_counter() - before_fread_t
-
- # Create state by concatenating follower current position
- state = []
- for name in self.follower_arms:
- if name in follower_pos:
- state.append(follower_pos[name])
- state = torch.cat(state)
-
- # Create action by concatenating follower goal position
- action = []
- for name in self.follower_arms:
- if name in follower_goal_pos:
- action.append(follower_goal_pos[name])
- action = torch.cat(action)
-
- # Capture images from cameras
- images = {}
- for name in self.cameras:
- before_camread_t = time.perf_counter()
- images[name] = self.cameras[name].async_read()
- images[name] = torch.from_numpy(images[name])
- self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
- self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
-
- # Populate output dictionnaries
- obs_dict, action_dict = {}, {}
- obs_dict["observation.state"] = state
- action_dict["action"] = action
- for name in self.cameras:
- obs_dict[f"observation.images.{name}"] = images[name]
-
- return obs_dict, action_dict
-
- def capture_observation(self):
- """The returned observations do not have a batch dimension."""
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- "ManipulatorRobot is not connected. You need to run `robot.connect()`."
- )
-
- # Read follower position
- follower_pos = {}
- for name in self.follower_arms:
- before_fread_t = time.perf_counter()
- follower_pos[name] = self.follower_arms[name].read("Present_Position")
- follower_pos[name] = torch.from_numpy(follower_pos[name])
- self.logs[f"read_follower_{name}_pos_dt_s"] = time.perf_counter() - before_fread_t
-
- # Create state by concatenating follower current position
- state = []
- for name in self.follower_arms:
- if name in follower_pos:
- state.append(follower_pos[name])
- state = torch.cat(state)
-
- # Capture images from cameras
- images = {}
- for name in self.cameras:
- before_camread_t = time.perf_counter()
- images[name] = self.cameras[name].async_read()
- images[name] = torch.from_numpy(images[name])
- self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
- self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
-
- # Populate output dictionnaries and format to pytorch
- obs_dict = {}
- obs_dict["observation.state"] = state
- for name in self.cameras:
- obs_dict[f"observation.images.{name}"] = images[name]
- return obs_dict
-
- def send_action(self, action: torch.Tensor) -> torch.Tensor:
- """Command the follower arms to move to a target joint configuration.
-
- The relative action magnitude may be clipped depending on the configuration parameter
- `max_relative_target`. In this case, the action sent differs from original action.
- Thus, this function always returns the action actually sent.
-
- Args:
- action: tensor containing the concatenated goal positions for the follower arms.
- """
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- "ManipulatorRobot is not connected. You need to run `robot.connect()`."
- )
-
- from_idx = 0
- to_idx = 0
- action_sent = []
- for name in self.follower_arms:
- # Get goal position of each follower arm by splitting the action vector
- to_idx += len(self.follower_arms[name].motor_names)
- goal_pos = action[from_idx:to_idx]
- from_idx = to_idx
-
- # Cap goal position when too far away from present position.
- # Slower fps expected due to reading from the follower.
- if self.config.max_relative_target is not None:
- present_pos = self.follower_arms[name].read("Present_Position")
- present_pos = torch.from_numpy(present_pos)
- goal_pos = ensure_safe_goal_position(goal_pos, present_pos, self.config.max_relative_target)
-
- # Save tensor to concat and return
- action_sent.append(goal_pos)
-
- # Send goal position to each follower
- goal_pos = goal_pos.numpy().astype(np.int32)
- self.follower_arms[name].write("Goal_Position", goal_pos)
-
- return torch.cat(action_sent)
-
- def print_logs(self):
- pass
- # TODO(aliberts): move robot-specific logs logic here
-
- def disconnect(self):
- if not self.is_connected:
- raise RobotDeviceNotConnectedError(
- "ManipulatorRobot is not connected. You need to run `robot.connect()` before disconnecting."
- )
-
- for name in self.follower_arms:
- self.follower_arms[name].disconnect()
-
- for name in self.leader_arms:
- self.leader_arms[name].disconnect()
-
- for name in self.cameras:
- self.cameras[name].disconnect()
-
- self.is_connected = False
-
- def __del__(self):
- if getattr(self, "is_connected", False):
- self.disconnect()
diff --git a/lerobot/common/robot_devices/robots/stretch.py b/lerobot/common/robot_devices/robots/stretch.py
deleted file mode 100644
index ff86b6d809..0000000000
--- a/lerobot/common/robot_devices/robots/stretch.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import time
-from dataclasses import dataclass, field, replace
-
-import torch
-from stretch_body.gamepad_teleop import GamePadTeleop
-from stretch_body.robot import Robot as StretchAPI
-from stretch_body.robot_params import RobotParams
-
-from lerobot.common.robot_devices.cameras.utils import Camera
-
-
-@dataclass
-class StretchRobotConfig:
- robot_type: str | None = "stretch"
- cameras: dict[str, Camera] = field(default_factory=lambda: {})
- # TODO(aliberts): add feature with max_relative target
- # TODO(aliberts): add comment on max_relative target
- max_relative_target: list[float] | float | None = None
-
-
-class StretchRobot(StretchAPI):
- """Wrapper of stretch_body.robot.Robot"""
-
- def __init__(self, config: StretchRobotConfig | None = None, **kwargs):
- super().__init__()
- if config is None:
- config = StretchRobotConfig()
- # Overwrite config arguments using kwargs
- self.config = replace(config, **kwargs)
-
- self.robot_type = self.config.robot_type
- self.cameras = self.config.cameras
- self.is_connected = False
- self.teleop = None
- self.logs = {}
-
- # TODO(aliberts): test this
- RobotParams.set_logging_level("WARNING")
- RobotParams.set_logging_formatter("brief_console_formatter")
-
- self.state_keys = None
- self.action_keys = None
-
- def connect(self) -> None:
- self.is_connected = self.startup()
- if not self.is_connected:
- print("Another process is already using Stretch. Try running 'stretch_free_robot_process.py'")
- raise ConnectionError()
-
- for name in self.cameras:
- self.cameras[name].connect()
- self.is_connected = self.is_connected and self.cameras[name].is_connected
-
- if not self.is_connected:
- print("Could not connect to the cameras, check that all cameras are plugged-in.")
- raise ConnectionError()
-
- self.run_calibration()
-
- def run_calibration(self) -> None:
- if not self.is_homed():
- self.home()
-
- def teleop_step(
- self, record_data=False
- ) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
- # TODO(aliberts): return ndarrays instead of torch.Tensors
- if not self.is_connected:
- raise ConnectionError()
-
- if self.teleop is None:
- self.teleop = GamePadTeleop(robot_instance=False)
- self.teleop.startup(robot=self)
-
- before_read_t = time.perf_counter()
- state = self.get_state()
- action = self.teleop.gamepad_controller.get_state()
- self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
-
- before_write_t = time.perf_counter()
- self.teleop.do_motion(robot=self)
- self.push_command()
- self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
-
- if self.state_keys is None:
- self.state_keys = list(state)
-
- if not record_data:
- return
-
- state = torch.as_tensor(list(state.values()))
- action = torch.as_tensor(list(action.values()))
-
- # Capture images from cameras
- images = {}
- for name in self.cameras:
- before_camread_t = time.perf_counter()
- images[name] = self.cameras[name].async_read()
- images[name] = torch.from_numpy(images[name])
- self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
- self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
-
- # Populate output dictionnaries
- obs_dict, action_dict = {}, {}
- obs_dict["observation.state"] = state
- action_dict["action"] = action
- for name in self.cameras:
- obs_dict[f"observation.images.{name}"] = images[name]
-
- return obs_dict, action_dict
-
- def get_state(self) -> dict:
- status = self.get_status()
- return {
- "head_pan.pos": status["head"]["head_pan"]["pos"],
- "head_tilt.pos": status["head"]["head_tilt"]["pos"],
- "lift.pos": status["lift"]["pos"],
- "arm.pos": status["arm"]["pos"],
- "wrist_pitch.pos": status["end_of_arm"]["wrist_pitch"]["pos"],
- "wrist_roll.pos": status["end_of_arm"]["wrist_roll"]["pos"],
- "wrist_yaw.pos": status["end_of_arm"]["wrist_yaw"]["pos"],
- "gripper.pos": status["end_of_arm"]["stretch_gripper"]["pos"],
- "base_x.vel": status["base"]["x_vel"],
- "base_y.vel": status["base"]["y_vel"],
- "base_theta.vel": status["base"]["theta_vel"],
- }
-
- def capture_observation(self) -> dict:
- # TODO(aliberts): return ndarrays instead of torch.Tensors
- before_read_t = time.perf_counter()
- state = self.get_state()
- self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
-
- if self.state_keys is None:
- self.state_keys = list(state)
-
- state = torch.as_tensor(list(state.values()))
-
- # Capture images from cameras
- images = {}
- for name in self.cameras:
- before_camread_t = time.perf_counter()
- images[name] = self.cameras[name].async_read()
- images[name] = torch.from_numpy(images[name])
- self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
- self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
-
- # Populate output dictionnaries
- obs_dict = {}
- obs_dict["observation.state"] = state
- for name in self.cameras:
- obs_dict[f"observation.images.{name}"] = images[name]
-
- return obs_dict
-
- def send_action(self, action: torch.Tensor) -> torch.Tensor:
- # TODO(aliberts): return ndarrays instead of torch.Tensors
- if not self.is_connected:
- raise ConnectionError()
-
- if self.teleop is None:
- self.teleop = GamePadTeleop(robot_instance=False)
- self.teleop.startup(robot=self)
-
- if self.action_keys is None:
- dummy_action = self.teleop.gamepad_controller.get_state()
- self.action_keys = list(dummy_action.keys())
-
- action_dict = dict(zip(self.action_keys, action.tolist(), strict=True))
-
- before_write_t = time.perf_counter()
- self.teleop.do_motion(state=action_dict, robot=self)
- self.push_command()
- self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
-
- # TODO(aliberts): return action_sent when motion is limited
- return action
-
- def print_logs(self) -> None:
- pass
- # TODO(aliberts): move robot-specific logs logic here
-
- def teleop_safety_stop(self) -> None:
- if self.teleop is not None:
- self.teleop._safety_stop(robot=self)
-
- def disconnect(self) -> None:
- self.stop()
- if self.teleop is not None:
- self.teleop.gamepad_controller.stop()
- self.teleop.stop()
-
- if len(self.cameras) > 0:
- for cam in self.cameras.values():
- cam.disconnect()
-
- self.is_connected = False
-
- def __del__(self):
- self.disconnect()
diff --git a/lerobot/common/robot_devices/robots/utils.py b/lerobot/common/robot_devices/robots/utils.py
deleted file mode 100644
index a40db13124..0000000000
--- a/lerobot/common/robot_devices/robots/utils.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from typing import Protocol
-
-
-def get_arm_id(name, arm_type):
- """Returns the string identifier of a robot arm. For instance, for a bimanual manipulator
- like Aloha, it could be left_follower, right_follower, left_leader, or right_leader.
- """
- return f"{name}_{arm_type}"
-
-
-class Robot(Protocol):
- # TODO(rcadene, aliberts): Add unit test checking the protocol is implemented in the corresponding classes
- robot_type: str
- features: dict
-
- def connect(self): ...
- def run_calibration(self): ...
- def teleop_step(self, record_data=False): ...
- def capture_observation(self): ...
- def send_action(self, action): ...
- def disconnect(self): ...
diff --git a/lerobot/common/robot_devices/utils.py b/lerobot/common/robot_devices/utils.py
deleted file mode 100644
index 19bb637e51..0000000000
--- a/lerobot/common/robot_devices/utils.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import platform
-import time
-
-
-def busy_wait(seconds):
- if platform.system() == "Darwin":
- # On Mac, `time.sleep` is not accurate and we need to use this while loop trick,
- # but it consumes CPU cycles.
- # TODO(rcadene): find an alternative: from python 11, time.sleep is precise
- end_time = time.perf_counter() + seconds
- while time.perf_counter() < end_time:
- pass
- else:
- # On Linux time.sleep is accurate
- if seconds > 0:
- time.sleep(seconds)
-
-
-def safe_disconnect(func):
- # TODO(aliberts): Allow to pass custom exceptions
- # (e.g. ThreadServiceExit, KeyboardInterrupt, SystemExit, UnpluggedError, DynamixelCommError)
- def wrapper(robot, *args, **kwargs):
- try:
- return func(robot, *args, **kwargs)
- except Exception as e:
- if robot.is_connected:
- robot.disconnect()
- raise e
-
- return wrapper
-
-
-class RobotDeviceNotConnectedError(Exception):
- """Exception raised when the robot device is not connected."""
-
- def __init__(
- self, message="This robot device is not connected. Try calling `robot_device.connect()` first."
- ):
- self.message = message
- super().__init__(self.message)
-
-
-class RobotDeviceAlreadyConnectedError(Exception):
- """Exception raised when the robot device is already connected."""
-
- def __init__(
- self,
- message="This robot device is already connected. Try not calling `robot_device.connect()` twice.",
- ):
- self.message = message
- super().__init__(self.message)
diff --git a/lerobot/common/utils/benchmark.py b/lerobot/common/utils/benchmark.py
deleted file mode 100644
index 4b08e6f6d8..0000000000
--- a/lerobot/common/utils/benchmark.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import threading
-import time
-from contextlib import ContextDecorator
-
-
-class TimeBenchmark(ContextDecorator):
- """
- Measures execution time using a context manager or decorator.
-
- This class supports both context manager and decorator usage, and is thread-safe for multithreaded
- environments.
-
- Args:
- print: If True, prints the elapsed time upon exiting the context or completing the function. Defaults
- to False.
-
- Examples:
-
- Using as a context manager:
-
- >>> benchmark = TimeBenchmark()
- >>> with benchmark:
- ... time.sleep(1)
- >>> print(f"Block took {benchmark.result:.4f} seconds")
- Block took approximately 1.0000 seconds
-
- Using with multithreading:
-
- ```python
- import threading
-
- benchmark = TimeBenchmark()
-
- def context_manager_example():
- with benchmark:
- time.sleep(0.01)
- print(f"Block took {benchmark.result_ms:.2f} milliseconds")
-
- threads = []
- for _ in range(3):
- t1 = threading.Thread(target=context_manager_example)
- threads.append(t1)
-
- for t in threads:
- t.start()
-
- for t in threads:
- t.join()
- ```
- Expected output:
- Block took approximately 10.00 milliseconds
- Block took approximately 10.00 milliseconds
- Block took approximately 10.00 milliseconds
- """
-
- def __init__(self, print=False):
- self.local = threading.local()
- self.print_time = print
-
- def __enter__(self):
- self.local.start_time = time.perf_counter()
- return self
-
- def __exit__(self, *exc):
- self.local.end_time = time.perf_counter()
- self.local.elapsed_time = self.local.end_time - self.local.start_time
- if self.print_time:
- print(f"Elapsed time: {self.local.elapsed_time:.4f} seconds")
- return False
-
- @property
- def result(self):
- return getattr(self.local, "elapsed_time", None)
-
- @property
- def result_ms(self):
- return self.result * 1e3
diff --git a/lerobot/common/utils/io_utils.py b/lerobot/common/utils/io_utils.py
deleted file mode 100644
index b85f17c7af..0000000000
--- a/lerobot/common/utils/io_utils.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import warnings
-
-import imageio
-
-
-def write_video(video_path, stacked_frames, fps):
- # Filter out DeprecationWarnings raised from pkg_resources
- with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore", "pkg_resources is deprecated as an API", category=DeprecationWarning
- )
- imageio.mimsave(video_path, stacked_frames, fps=fps)
diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py
deleted file mode 100644
index 4e276e169b..0000000000
--- a/lerobot/common/utils/utils.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-import os
-import os.path as osp
-import platform
-import random
-from contextlib import contextmanager
-from datetime import datetime, timezone
-from pathlib import Path
-from typing import Any, Generator
-
-import hydra
-import numpy as np
-import torch
-from omegaconf import DictConfig
-
-
-def none_or_int(value):
- if value == "None":
- return None
- return int(value)
-
-
-def inside_slurm():
- """Check whether the python process was launched through slurm"""
- # TODO(rcadene): return False for interactive mode `--pty bash`
- return "SLURM_JOB_ID" in os.environ
-
-
-def get_safe_torch_device(cfg_device: str, log: bool = False) -> torch.device:
- """Given a string, return a torch.device with checks on whether the device is available."""
- match cfg_device:
- case "cuda":
- assert torch.cuda.is_available()
- device = torch.device("cuda")
- case "mps":
- assert torch.backends.mps.is_available()
- device = torch.device("mps")
- case "cpu":
- device = torch.device("cpu")
- if log:
- logging.warning("Using CPU, this will be slow.")
- case _:
- device = torch.device(cfg_device)
- if log:
- logging.warning(f"Using custom {cfg_device} device.")
-
- return device
-
-
-def get_global_random_state() -> dict[str, Any]:
- """Get the random state for `random`, `numpy`, and `torch`."""
- random_state_dict = {
- "random_state": random.getstate(),
- "numpy_random_state": np.random.get_state(),
- "torch_random_state": torch.random.get_rng_state(),
- }
- if torch.cuda.is_available():
- random_state_dict["torch_cuda_random_state"] = torch.cuda.random.get_rng_state()
- return random_state_dict
-
-
-def set_global_random_state(random_state_dict: dict[str, Any]):
- """Set the random state for `random`, `numpy`, and `torch`.
-
- Args:
- random_state_dict: A dictionary of the form returned by `get_global_random_state`.
- """
- random.setstate(random_state_dict["random_state"])
- np.random.set_state(random_state_dict["numpy_random_state"])
- torch.random.set_rng_state(random_state_dict["torch_random_state"])
- if torch.cuda.is_available():
- torch.cuda.random.set_rng_state(random_state_dict["torch_cuda_random_state"])
-
-
-def set_global_seed(seed):
- """Set seed for reproducibility."""
- random.seed(seed)
- np.random.seed(seed)
- torch.manual_seed(seed)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(seed)
-
-
-@contextmanager
-def seeded_context(seed: int) -> Generator[None, None, None]:
- """Set the seed when entering a context, and restore the prior random state at exit.
-
- Example usage:
-
- ```
- a = random.random() # produces some random number
- with seeded_context(1337):
- b = random.random() # produces some other random number
- c = random.random() # produces yet another random number, but the same it would have if we never made `b`
- ```
- """
- random_state_dict = get_global_random_state()
- set_global_seed(seed)
- yield None
- set_global_random_state(random_state_dict)
-
-
-def init_logging():
- def custom_format(record):
- dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
- fnameline = f"{record.pathname}:{record.lineno}"
- message = f"{record.levelname} {dt} {fnameline[-15:]:>15} {record.msg}"
- return message
-
- logging.basicConfig(level=logging.INFO)
-
- for handler in logging.root.handlers[:]:
- logging.root.removeHandler(handler)
-
- formatter = logging.Formatter()
- formatter.format = custom_format
- console_handler = logging.StreamHandler()
- console_handler.setFormatter(formatter)
- logging.getLogger().addHandler(console_handler)
-
-
-def format_big_number(num, precision=0):
- suffixes = ["", "K", "M", "B", "T", "Q"]
- divisor = 1000.0
-
- for suffix in suffixes:
- if abs(num) < divisor:
- return f"{num:.{precision}f}{suffix}"
- num /= divisor
-
- return num
-
-
-def _relative_path_between(path1: Path, path2: Path) -> Path:
- """Returns path1 relative to path2."""
- path1 = path1.absolute()
- path2 = path2.absolute()
- try:
- return path1.relative_to(path2)
- except ValueError: # most likely because path1 is not a subpath of path2
- common_parts = Path(osp.commonpath([path1, path2])).parts
- return Path(
- "/".join([".."] * (len(path2.parts) - len(common_parts)) + list(path1.parts[len(common_parts) :]))
- )
-
-
-def init_hydra_config(config_path: str, overrides: list[str] | None = None) -> DictConfig:
- """Initialize a Hydra config given only the path to the relevant config file.
-
- For config resolution, it is assumed that the config file's parent is the Hydra config dir.
- """
- # TODO(alexander-soare): Resolve configs without Hydra initialization.
- hydra.core.global_hydra.GlobalHydra.instance().clear()
- # Hydra needs a path relative to this file.
- hydra.initialize(
- str(_relative_path_between(Path(config_path).absolute().parent, Path(__file__).absolute().parent)),
- version_base="1.2",
- )
- cfg = hydra.compose(Path(config_path).stem, overrides)
- return cfg
-
-
-def print_cuda_memory_usage():
- """Use this function to locate and debug memory leak."""
- import gc
-
- gc.collect()
- # Also clear the cache if you want to fully release the memory
- torch.cuda.empty_cache()
- print("Current GPU Memory Allocated: {:.2f} MB".format(torch.cuda.memory_allocated(0) / 1024**2))
- print("Maximum GPU Memory Allocated: {:.2f} MB".format(torch.cuda.max_memory_allocated(0) / 1024**2))
- print("Current GPU Memory Reserved: {:.2f} MB".format(torch.cuda.memory_reserved(0) / 1024**2))
- print("Maximum GPU Memory Reserved: {:.2f} MB".format(torch.cuda.max_memory_reserved(0) / 1024**2))
-
-
-def capture_timestamp_utc():
- return datetime.now(timezone.utc)
-
-
-def say(text, blocking=False):
- # Check if mac, linux, or windows.
- if platform.system() == "Darwin":
- cmd = f'say "{text}"'
- if not blocking:
- cmd += " &"
- elif platform.system() == "Linux":
- cmd = f'spd-say "{text}"'
- if blocking:
- cmd += " --wait"
- elif platform.system() == "Windows":
- # TODO(rcadene): Make blocking option work for Windows
- cmd = (
- 'PowerShell -Command "Add-Type -AssemblyName System.Speech; '
- f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')\""
- )
-
- os.system(cmd)
-
-
-def log_say(text, play_sounds, blocking=False):
- logging.info(text)
-
- if play_sounds:
- say(text, blocking)
diff --git a/lerobot/configs/default.yaml b/lerobot/configs/default.yaml
deleted file mode 100644
index a3ff1d41b2..0000000000
--- a/lerobot/configs/default.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-defaults:
- - _self_
- - env: pusht
- - policy: diffusion
-
-hydra:
- run:
- # Set `dir` to where you would like to save all of the run outputs. If you run another training session
- # with the same value for `dir` its contents will be overwritten unless you set `resume` to true.
- dir: outputs/train/${now:%Y-%m-%d}/${now:%H-%M-%S}_${env.name}_${policy.name}_${hydra.job.name}
- job:
- name: default
-
-# Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure
-# `hydra.run.dir` is the directory of an existing run with at least one checkpoint in it.
-# Note that when resuming a run, the default behavior is to use the configuration from the checkpoint,
-# regardless of what's provided with the training command at the time of resumption.
-resume: false
-device: cuda # cpu
-# `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
-# automatic gradient scaling is used.
-use_amp: false
-# `seed` is used for training (eg: model initialization, dataset shuffling)
-# AND for the evaluation environments.
-seed: ???
-# You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data
-# keys common between the datasets are kept. Each dataset gets and additional transform that inserts the
-# "dataset_index" into the returned item. The index mapping is made according to the order in which the
-# datsets are provided.
-dataset_repo_id: lerobot/pusht
-video_backend: pyav
-
-training:
- offline_steps: ???
-
- # Number of workers for the offline training dataloader.
- num_workers: 4
-
- batch_size: ???
-
- eval_freq: ???
- log_freq: 200
- save_checkpoint: true
- # Checkpoint is saved every `save_freq` training iterations and after the last training step.
- save_freq: ???
-
- # Online training. Note that the online training loop adopts most of the options above apart from the
- # dataloader options. Unless otherwise specified.
- # The online training look looks something like:
- #
- # for i in range(online_steps):
- # do_online_rollout_and_update_online_buffer()
- # for j in range(online_steps_between_rollouts):
- # batch = next(dataloader_with_offline_and_online_data)
- # loss = policy(batch)
- # loss.backward()
- # optimizer.step()
- #
- online_steps: ???
- # How many episodes to collect at once when we reach the online rollout part of the training loop.
- online_rollout_n_episodes: 1
- # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for
- # the policy. Ideally you should set this to by an even divisor or online_rollout_n_episodes.
- online_rollout_batch_size: 1
- # How many optimization steps (forward, backward, optimizer step) to do between running rollouts.
- online_steps_between_rollouts: null
- # The proportion of online samples (vs offline samples) to include in the online training batches.
- online_sampling_ratio: 0.5
- # First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1.
- online_env_seed: null
- # Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is
- # FIFO.
- online_buffer_capacity: null
- # The minimum number of frames to have in the online buffer before commencing online training.
- # If online_buffer_seed_size > online_rollout_n_episodes, the rollout will be run multiple times until the
- # seed size condition is satisfied.
- online_buffer_seed_size: 0
- # Whether to run the online rollouts asynchronously. This means we can run the online training steps in
- # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training
- # + eval + environment rendering simultaneously.
- do_online_rollout_async: false
-
- image_transforms:
- # These transforms are all using standard torchvision.transforms.v2
- # You can find out how these transformations affect images here:
- # https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html
- # We use a custom RandomSubsetApply container to sample them.
- # For each transform, the following parameters are available:
- # weight: This represents the multinomial probability (with no replacement)
- # used for sampling the transform. If the sum of the weights is not 1,
- # they will be normalized.
- # min_max: Lower & upper bound respectively used for sampling the transform's parameter
- # (following uniform distribution) when it's applied.
- # Set this flag to `true` to enable transforms during training
- enable: false
- # This is the maximum number of transforms (sampled from these below) that will be applied to each frame.
- # It's an integer in the interval [1, number of available transforms].
- max_num_transforms: 3
- # By default, transforms are applied in Torchvision's suggested order (shown below).
- # Set this to True to apply them in a random order.
- random_order: false
- brightness:
- weight: 1
- min_max: [0.8, 1.2]
- contrast:
- weight: 1
- min_max: [0.8, 1.2]
- saturation:
- weight: 1
- min_max: [0.5, 1.5]
- hue:
- weight: 1
- min_max: [-0.05, 0.05]
- sharpness:
- weight: 1
- min_max: [0.8, 1.2]
-
-eval:
- n_episodes: 1
- # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv.
- batch_size: 1
- # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing).
- use_async_envs: false
-
-wandb:
- enable: false
- # Set to true to disable saving an artifact despite save_checkpoint == True
- disable_artifact: false
- project: lerobot
- notes: ""
diff --git a/lerobot/configs/env/aloha.yaml b/lerobot/configs/env/aloha.yaml
deleted file mode 100644
index 296a4481c4..0000000000
--- a/lerobot/configs/env/aloha.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-# @package _global_
-
-fps: 50
-
-env:
- name: aloha
- task: AlohaInsertion-v0
- state_dim: 14
- action_dim: 14
- fps: ${fps}
- episode_length: 400
- gym:
- obs_type: pixels_agent_pos
- render_mode: rgb_array
diff --git a/lerobot/configs/env/aloha_real.yaml b/lerobot/configs/env/aloha_real.yaml
deleted file mode 100644
index 57af4be20b..0000000000
--- a/lerobot/configs/env/aloha_real.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# @package _global_
-
-fps: 30
-
-env:
- name: real_world
- task: null
- state_dim: 18
- action_dim: 18
- fps: ${fps}
diff --git a/lerobot/configs/env/dora_aloha_real.yaml b/lerobot/configs/env/dora_aloha_real.yaml
deleted file mode 100644
index 088781d4ed..0000000000
--- a/lerobot/configs/env/dora_aloha_real.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# @package _global_
-
-fps: 30
-
-env:
- name: dora
- task: DoraAloha-v0
- state_dim: 14
- action_dim: 14
- fps: ${fps}
- episode_length: 400
- gym:
- fps: ${fps}
diff --git a/lerobot/configs/env/koch_real.yaml b/lerobot/configs/env/koch_real.yaml
deleted file mode 100644
index 8e65d72f4e..0000000000
--- a/lerobot/configs/env/koch_real.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# @package _global_
-
-fps: 30
-
-env:
- name: real_world
- task: null
- state_dim: 6
- action_dim: 6
- fps: ${fps}
diff --git a/lerobot/configs/env/moss_real.yaml b/lerobot/configs/env/moss_real.yaml
deleted file mode 100644
index 8e65d72f4e..0000000000
--- a/lerobot/configs/env/moss_real.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# @package _global_
-
-fps: 30
-
-env:
- name: real_world
- task: null
- state_dim: 6
- action_dim: 6
- fps: ${fps}
diff --git a/lerobot/configs/env/pusht.yaml b/lerobot/configs/env/pusht.yaml
deleted file mode 100644
index 771fbbf4d4..0000000000
--- a/lerobot/configs/env/pusht.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-# @package _global_
-
-fps: 10
-
-env:
- name: pusht
- task: PushT-v0
- image_size: 96
- state_dim: 2
- action_dim: 2
- fps: ${fps}
- episode_length: 300
- gym:
- obs_type: pixels_agent_pos
- render_mode: rgb_array
- visualization_width: 384
- visualization_height: 384
diff --git a/lerobot/configs/env/so100_real.yaml b/lerobot/configs/env/so100_real.yaml
deleted file mode 100644
index 8e65d72f4e..0000000000
--- a/lerobot/configs/env/so100_real.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# @package _global_
-
-fps: 30
-
-env:
- name: real_world
- task: null
- state_dim: 6
- action_dim: 6
- fps: ${fps}
diff --git a/lerobot/configs/env/xarm.yaml b/lerobot/configs/env/xarm.yaml
deleted file mode 100644
index 4320379aee..0000000000
--- a/lerobot/configs/env/xarm.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-# @package _global_
-
-fps: 15
-
-env:
- name: xarm
- task: XarmLift-v0
- image_size: 84
- state_dim: 4
- action_dim: 4
- fps: ${fps}
- episode_length: 200
- gym:
- obs_type: pixels_agent_pos
- render_mode: rgb_array
- visualization_width: 384
- visualization_height: 384
diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml
deleted file mode 100644
index 28883936a6..0000000000
--- a/lerobot/configs/policy/act.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-# @package _global_
-
-seed: 1000
-dataset_repo_id: lerobot/aloha_sim_insertion_human
-
-override_dataset_stats:
- observation.images.top:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-training:
- offline_steps: 100000
- online_steps: 0
- eval_freq: 20000
- save_freq: 20000
- save_checkpoint: true
-
- batch_size: 8
- lr: 1e-5
- lr_backbone: 1e-5
- weight_decay: 1e-4
- grad_clip_norm: 10
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- action: "[i / ${fps} for i in range(${policy.chunk_size})]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-# See `configuration_act.py` for more details.
-policy:
- name: act
-
- # Input / output structure.
- n_obs_steps: 1
- chunk_size: 100 # chunk_size
- n_action_steps: 100
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.images.top: [3, 480, 640]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.images.top: mean_std
- observation.state: mean_std
- output_normalization_modes:
- action: mean_std
-
- # Architecture.
- # Vision backbone.
- vision_backbone: resnet18
- pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
- replace_final_stride_with_dilation: false
- # Transformer layers.
- pre_norm: false
- dim_model: 512
- n_heads: 8
- dim_feedforward: 3200
- feedforward_activation: relu
- n_encoder_layers: 4
- # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
- # that means only the first layer is used. Here we match the original implementation by setting this to 1.
- # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
- n_decoder_layers: 1
- # VAE.
- use_vae: true
- latent_dim: 32
- n_vae_encoder_layers: 4
-
- # Inference.
- temporal_ensemble_coeff: null
-
- # Training and loss computation.
- dropout: 0.1
- kl_weight: 10.0
diff --git a/lerobot/configs/policy/act_aloha_real.yaml b/lerobot/configs/policy/act_aloha_real.yaml
deleted file mode 100644
index 7c8094da19..0000000000
--- a/lerobot/configs/policy/act_aloha_real.yaml
+++ /dev/null
@@ -1,121 +0,0 @@
-# @package _global_
-
-# Use `act_aloha_real.yaml` to train on real-world datasets collected on Aloha or Aloha-2 robots.
-# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, cam_high, cam_low) instead of 1 camera (i.e. top).
-# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
-# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
-# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
-#
-# Example of usage for training and inference with `control_robot.py`:
-# ```bash
-# python lerobot/scripts/train.py \
-# policy=act_aloha_real \
-# env=aloha_real
-# ```
-#
-# Example of usage for training and inference with [Dora-rs](https://github.com/dora-rs/dora-lerobot):
-# ```bash
-# python lerobot/scripts/train.py \
-# policy=act_aloha_real \
-# env=dora_aloha_real
-# ```
-
-seed: 1000
-dataset_repo_id: lerobot/aloha_static_vinh_cup
-
-override_dataset_stats:
- observation.images.cam_right_wrist:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.cam_left_wrist:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.cam_high:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.cam_low:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-training:
- offline_steps: 80000
- online_steps: 0
- eval_freq: -1
- save_freq: 10000
- log_freq: 100
- save_checkpoint: true
-
- batch_size: 8
- lr: 1e-5
- lr_backbone: 1e-5
- weight_decay: 1e-4
- grad_clip_norm: 10
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- action: "[i / ${fps} for i in range(${policy.chunk_size})]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-# See `configuration_act.py` for more details.
-policy:
- name: act
-
- # Input / output structure.
- n_obs_steps: 1
- chunk_size: 100
- n_action_steps: 100
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.images.cam_right_wrist: [3, 480, 640]
- observation.images.cam_left_wrist: [3, 480, 640]
- observation.images.cam_high: [3, 480, 640]
- observation.images.cam_low: [3, 480, 640]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.images.cam_right_wrist: mean_std
- observation.images.cam_left_wrist: mean_std
- observation.images.cam_high: mean_std
- observation.images.cam_low: mean_std
- observation.state: mean_std
- output_normalization_modes:
- action: mean_std
-
- # Architecture.
- # Vision backbone.
- vision_backbone: resnet18
- pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
- replace_final_stride_with_dilation: false
- # Transformer layers.
- pre_norm: false
- dim_model: 512
- n_heads: 8
- dim_feedforward: 3200
- feedforward_activation: relu
- n_encoder_layers: 4
- # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
- # that means only the first layer is used. Here we match the original implementation by setting this to 1.
- # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
- n_decoder_layers: 1
- # VAE.
- use_vae: true
- latent_dim: 32
- n_vae_encoder_layers: 4
-
- # Inference.
- temporal_ensemble_coeff: null
-
- # Training and loss computation.
- dropout: 0.1
- kl_weight: 10.0
diff --git a/lerobot/configs/policy/act_koch_real.yaml b/lerobot/configs/policy/act_koch_real.yaml
deleted file mode 100644
index 6ddebab14d..0000000000
--- a/lerobot/configs/policy/act_koch_real.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-# @package _global_
-
-# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots.
-# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top).
-# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
-# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
-# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
-#
-# Example of usage for training:
-# ```bash
-# python lerobot/scripts/train.py \
-# policy=act_koch_real \
-# env=koch_real
-# ```
-
-seed: 1000
-dataset_repo_id: lerobot/koch_pick_place_lego
-
-override_dataset_stats:
- observation.images.laptop:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.phone:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-training:
- offline_steps: 80000
- online_steps: 0
- eval_freq: -1
- save_freq: 10000
- log_freq: 100
- save_checkpoint: true
-
- batch_size: 8
- lr: 1e-5
- lr_backbone: 1e-5
- weight_decay: 1e-4
- grad_clip_norm: 10
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- action: "[i / ${fps} for i in range(${policy.chunk_size})]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-# See `configuration_act.py` for more details.
-policy:
- name: act
-
- # Input / output structure.
- n_obs_steps: 1
- chunk_size: 100
- n_action_steps: 100
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.images.laptop: [3, 480, 640]
- observation.images.phone: [3, 480, 640]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.images.laptop: mean_std
- observation.images.phone: mean_std
- observation.state: mean_std
- output_normalization_modes:
- action: mean_std
-
- # Architecture.
- # Vision backbone.
- vision_backbone: resnet18
- pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
- replace_final_stride_with_dilation: false
- # Transformer layers.
- pre_norm: false
- dim_model: 512
- n_heads: 8
- dim_feedforward: 3200
- feedforward_activation: relu
- n_encoder_layers: 4
- # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
- # that means only the first layer is used. Here we match the original implementation by setting this to 1.
- # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
- n_decoder_layers: 1
- # VAE.
- use_vae: true
- latent_dim: 32
- n_vae_encoder_layers: 4
-
- # Inference.
- temporal_ensemble_coeff: null
-
- # Training and loss computation.
- dropout: 0.1
- kl_weight: 10.0
diff --git a/lerobot/configs/policy/act_moss_real.yaml b/lerobot/configs/policy/act_moss_real.yaml
deleted file mode 100644
index d996c3597d..0000000000
--- a/lerobot/configs/policy/act_moss_real.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-# @package _global_
-
-# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots.
-# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top).
-# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
-# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
-# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
-#
-# Example of usage for training:
-# ```bash
-# python lerobot/scripts/train.py \
-# policy=act_koch_real \
-# env=koch_real
-# ```
-
-seed: 1000
-dataset_repo_id: lerobot/moss_pick_place_lego
-
-override_dataset_stats:
- observation.images.laptop:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.phone:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-training:
- offline_steps: 80000
- online_steps: 0
- eval_freq: -1
- save_freq: 10000
- log_freq: 100
- save_checkpoint: true
-
- batch_size: 8
- lr: 1e-5
- lr_backbone: 1e-5
- weight_decay: 1e-4
- grad_clip_norm: 10
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- action: "[i / ${fps} for i in range(${policy.chunk_size})]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-# See `configuration_act.py` for more details.
-policy:
- name: act
-
- # Input / output structure.
- n_obs_steps: 1
- chunk_size: 100
- n_action_steps: 100
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.images.laptop: [3, 480, 640]
- observation.images.phone: [3, 480, 640]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.images.laptop: mean_std
- observation.images.phone: mean_std
- observation.state: mean_std
- output_normalization_modes:
- action: mean_std
-
- # Architecture.
- # Vision backbone.
- vision_backbone: resnet18
- pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
- replace_final_stride_with_dilation: false
- # Transformer layers.
- pre_norm: false
- dim_model: 512
- n_heads: 8
- dim_feedforward: 3200
- feedforward_activation: relu
- n_encoder_layers: 4
- # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
- # that means only the first layer is used. Here we match the original implementation by setting this to 1.
- # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
- n_decoder_layers: 1
- # VAE.
- use_vae: true
- latent_dim: 32
- n_vae_encoder_layers: 4
-
- # Inference.
- temporal_ensemble_coeff: null
-
- # Training and loss computation.
- dropout: 0.1
- kl_weight: 10.0
diff --git a/lerobot/configs/policy/act_so100_real.yaml b/lerobot/configs/policy/act_so100_real.yaml
deleted file mode 100644
index cf5b1f1470..0000000000
--- a/lerobot/configs/policy/act_so100_real.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-# @package _global_
-
-# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots.
-# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top).
-# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
-# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
-# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
-#
-# Example of usage for training:
-# ```bash
-# python lerobot/scripts/train.py \
-# policy=act_koch_real \
-# env=koch_real
-# ```
-
-seed: 1000
-dataset_repo_id: lerobot/so100_pick_place_lego
-
-override_dataset_stats:
- observation.images.laptop:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
- observation.images.phone:
- # stats from imagenet, since we use a pretrained vision model
- mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
- std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
-
-training:
- offline_steps: 80000
- online_steps: 0
- eval_freq: -1
- save_freq: 10000
- log_freq: 100
- save_checkpoint: true
-
- batch_size: 8
- lr: 1e-5
- lr_backbone: 1e-5
- weight_decay: 1e-4
- grad_clip_norm: 10
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- action: "[i / ${fps} for i in range(${policy.chunk_size})]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-# See `configuration_act.py` for more details.
-policy:
- name: act
-
- # Input / output structure.
- n_obs_steps: 1
- chunk_size: 100
- n_action_steps: 100
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.images.laptop: [3, 480, 640]
- observation.images.phone: [3, 480, 640]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.images.laptop: mean_std
- observation.images.phone: mean_std
- observation.state: mean_std
- output_normalization_modes:
- action: mean_std
-
- # Architecture.
- # Vision backbone.
- vision_backbone: resnet18
- pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
- replace_final_stride_with_dilation: false
- # Transformer layers.
- pre_norm: false
- dim_model: 512
- n_heads: 8
- dim_feedforward: 3200
- feedforward_activation: relu
- n_encoder_layers: 4
- # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
- # that means only the first layer is used. Here we match the original implementation by setting this to 1.
- # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
- n_decoder_layers: 1
- # VAE.
- use_vae: true
- latent_dim: 32
- n_vae_encoder_layers: 4
-
- # Inference.
- temporal_ensemble_coeff: null
-
- # Training and loss computation.
- dropout: 0.1
- kl_weight: 10.0
diff --git a/lerobot/configs/policy/diffusion.yaml b/lerobot/configs/policy/diffusion.yaml
deleted file mode 100644
index 880819bb94..0000000000
--- a/lerobot/configs/policy/diffusion.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# @package _global_
-
-# Defaults for training for the PushT dataset as per https://github.com/real-stanford/diffusion_policy.
-# Note: We do not track EMA model weights as we discovered it does not improve the results. See
-# https://github.com/huggingface/lerobot/pull/134 for more details.
-
-seed: 100000
-dataset_repo_id: lerobot/pusht
-
-override_dataset_stats:
- # TODO(rcadene, alexander-soare): should we remove image stats as well? do we use a pretrained vision model?
- observation.image:
- mean: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
- std: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
- # TODO(rcadene, alexander-soare): we override state and action stats to use the same as the pretrained model
- # from the original codebase, but we should remove these and train our own pretrained model
- observation.state:
- min: [13.456424, 32.938293]
- max: [496.14618, 510.9579]
- action:
- min: [12.0, 25.0]
- max: [511.0, 511.0]
-
-training:
- offline_steps: 200000
- online_steps: 0
- eval_freq: 25000
- save_freq: 25000
- save_checkpoint: true
-
- batch_size: 64
- grad_clip_norm: 10
- lr: 1.0e-4
- lr_scheduler: cosine
- lr_warmup_steps: 500
- adam_betas: [0.95, 0.999]
- adam_eps: 1.0e-8
- adam_weight_decay: 1.0e-6
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- observation.image: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
- observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
- action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1 - ${policy.n_obs_steps} + ${policy.horizon})]"
-
- # The original implementation doesn't sample frames for the last 7 steps,
- # which avoids excessive padding and leads to improved training results.
- drop_n_last_frames: 7 # ${policy.horizon} - ${policy.n_action_steps} - ${policy.n_obs_steps} + 1
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-policy:
- name: diffusion
-
- # Input / output structure.
- n_obs_steps: 2
- horizon: 16
- n_action_steps: 8
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.image: [3, 96, 96]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.image: mean_std
- observation.state: min_max
- output_normalization_modes:
- action: min_max
-
- # Architecture / modeling.
- # Vision backbone.
- vision_backbone: resnet18
- crop_shape: [84, 84]
- crop_is_random: True
- pretrained_backbone_weights: null
- use_group_norm: True
- spatial_softmax_num_keypoints: 32
- # Unet.
- down_dims: [512, 1024, 2048]
- kernel_size: 5
- n_groups: 8
- diffusion_step_embed_dim: 128
- use_film_scale_modulation: True
- # Noise scheduler.
- noise_scheduler_type: DDPM
- num_train_timesteps: 100
- beta_schedule: squaredcos_cap_v2
- beta_start: 0.0001
- beta_end: 0.02
- prediction_type: epsilon # epsilon / sample
- clip_sample: True
- clip_sample_range: 1.0
-
- # Inference
- num_inference_steps: null # if not provided, defaults to `num_train_timesteps`
-
- # Loss computation
- do_mask_loss_for_padding: false
diff --git a/lerobot/configs/policy/diffusion_pusht_keypoints.yaml b/lerobot/configs/policy/diffusion_pusht_keypoints.yaml
deleted file mode 100644
index a5fe6cf936..0000000000
--- a/lerobot/configs/policy/diffusion_pusht_keypoints.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-# @package _global_
-
-# Defaults for training for the pusht_keypoints dataset.
-
-# They keypoints are on the vertices of the rectangles that make up the PushT as documented in the PushT
-# environment:
-# https://github.com/huggingface/gym-pusht/blob/5e2489be9ff99ed9cd47b6c653dda3b7aa844d24/gym_pusht/envs/pusht.py#L522-L534
-# For completeness, the diagram is copied here:
-# 0───────────1
-# │ │
-# 3───4───5───2
-# │ │
-# │ │
-# │ │
-# │ │
-# 7───6
-
-
-# Note: The original work trains keypoints-only with conditioning via inpainting. Here, we encode the
-# observation along with the agent position and use the encoding as global conditioning for the denoising
-# U-Net.
-
-# Note: We do not track EMA model weights as we discovered it does not improve the results. See
-# https://github.com/huggingface/lerobot/pull/134 for more details.
-
-seed: 100000
-dataset_repo_id: lerobot/pusht_keypoints
-
-training:
- offline_steps: 200000
- online_steps: 0
- eval_freq: 5000
- save_freq: 5000
- log_freq: 250
- save_checkpoint: true
-
- batch_size: 64
- grad_clip_norm: 10
- lr: 1.0e-4
- lr_scheduler: cosine
- lr_warmup_steps: 500
- adam_betas: [0.95, 0.999]
- adam_eps: 1.0e-8
- adam_weight_decay: 1.0e-6
- online_steps_between_rollouts: 1
-
- delta_timestamps:
- observation.environment_state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
- observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
- action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1 - ${policy.n_obs_steps} + ${policy.horizon})]"
-
- # The original implementation doesn't sample frames for the last 7 steps,
- # which avoids excessive padding and leads to improved training results.
- drop_n_last_frames: 7 # ${policy.horizon} - ${policy.n_action_steps} - ${policy.n_obs_steps} + 1
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-policy:
- name: diffusion
-
- # Input / output structure.
- n_obs_steps: 2
- horizon: 16
- n_action_steps: 8
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.environment_state: [16]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.environment_state: min_max
- observation.state: min_max
- output_normalization_modes:
- action: min_max
-
- # Architecture / modeling.
- # Vision backbone.
- vision_backbone: resnet18
- crop_shape: [84, 84]
- crop_is_random: True
- pretrained_backbone_weights: null
- use_group_norm: True
- spatial_softmax_num_keypoints: 32
- # Unet.
- down_dims: [256, 512, 1024]
- kernel_size: 5
- n_groups: 8
- diffusion_step_embed_dim: 128
- use_film_scale_modulation: True
- # Noise scheduler.
- noise_scheduler_type: DDIM
- num_train_timesteps: 100
- beta_schedule: squaredcos_cap_v2
- beta_start: 0.0001
- beta_end: 0.02
- prediction_type: epsilon # epsilon / sample
- clip_sample: True
- clip_sample_range: 1.0
-
- # Inference
- num_inference_steps: 10 # if not provided, defaults to `num_train_timesteps`
-
- # Loss computation
- do_mask_loss_for_padding: false
diff --git a/lerobot/configs/policy/tdmpc.yaml b/lerobot/configs/policy/tdmpc.yaml
deleted file mode 100644
index 7c85fcf807..0000000000
--- a/lerobot/configs/policy/tdmpc.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-# @package _global_
-
-seed: 1
-dataset_repo_id: lerobot/xarm_lift_medium
-
-training:
- offline_steps: 50000
-
- num_workers: 4
-
- batch_size: 256
- grad_clip_norm: 10.0
- lr: 3e-4
-
- save_freq: 10000
- eval_freq: 5000
- log_freq: 100
-
- online_steps: 50000
- online_rollout_n_episodes: 1
- online_rollout_batch_size: 1
- # Note: in FOWM `online_steps_between_rollouts` is actually dynamically set to match exactly the length of
- # the last sampled episode.
- online_steps_between_rollouts: 50
- online_sampling_ratio: 0.5
- online_env_seed: 10000
- # FOWM Push uses 10000 for `online_buffer_capacity`. Given that their maximum episode length for this task
- # is 25, 10000 is approx 400 of their episodes worth. Since our episodes are about 8 times longer, we'll use
- # 80000.
- online_buffer_capacity: 80000
-
- delta_timestamps:
- observation.image: "[i / ${fps} for i in range(${policy.horizon} + 1)]"
- observation.state: "[i / ${fps} for i in range(${policy.horizon} + 1)]"
- action: "[i / ${fps} for i in range(${policy.horizon})]"
- next.reward: "[i / ${fps} for i in range(${policy.horizon})]"
-
-policy:
- name: tdmpc
-
- pretrained_model_path:
-
- # Input / output structure.
- n_action_repeats: 2
- horizon: 5
- n_action_steps: 1
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.image: [3, 84, 84]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes: null
- output_normalization_modes:
- action: min_max
-
- # Architecture / modeling.
- # Neural networks.
- image_encoder_hidden_dim: 32
- state_encoder_hidden_dim: 256
- latent_dim: 50
- q_ensemble_size: 5
- mlp_dim: 512
- # Reinforcement learning.
- discount: 0.9
-
- # Inference.
- use_mpc: true
- cem_iterations: 6
- max_std: 2.0
- min_std: 0.05
- n_gaussian_samples: 512
- n_pi_samples: 51
- uncertainty_regularizer_coeff: 1.0
- n_elites: 50
- elite_weighting_temperature: 0.5
- gaussian_mean_momentum: 0.1
-
- # Training and loss computation.
- max_random_shift_ratio: 0.0476
- # Loss coefficients.
- reward_coeff: 0.5
- expectile_weight: 0.9
- value_coeff: 0.1
- consistency_coeff: 20.0
- advantage_scaling: 3.0
- pi_coeff: 0.5
- temporal_decay_coeff: 0.5
- # Target model.
- target_model_momentum: 0.995
diff --git a/lerobot/configs/policy/tdmpc_pusht_keypoints.yaml b/lerobot/configs/policy/tdmpc_pusht_keypoints.yaml
deleted file mode 100644
index 1cfc5b5276..0000000000
--- a/lerobot/configs/policy/tdmpc_pusht_keypoints.yaml
+++ /dev/null
@@ -1,105 +0,0 @@
-# @package _global_
-
-# Train with:
-#
-# python lerobot/scripts/train.py \
-# env=pusht \
-# env.gym.obs_type=environment_state_agent_pos \
-# policy=tdmpc_pusht_keypoints \
-# eval.batch_size=50 \
-# eval.n_episodes=50 \
-# eval.use_async_envs=true \
-# device=cuda \
-# use_amp=true
-
-seed: 1
-dataset_repo_id: lerobot/pusht_keypoints
-
-training:
- offline_steps: 0
-
- # Offline training dataloader
- num_workers: 4
-
- batch_size: 256
- grad_clip_norm: 10.0
- lr: 3e-4
-
- eval_freq: 10000
- log_freq: 500
- save_freq: 50000
-
- online_steps: 1000000
- online_rollout_n_episodes: 10
- online_rollout_batch_size: 10
- online_steps_between_rollouts: 1000
- online_sampling_ratio: 1.0
- online_env_seed: 10000
- online_buffer_capacity: 40000
- online_buffer_seed_size: 0
- do_online_rollout_async: false
-
- delta_timestamps:
- observation.environment_state: "[i / ${fps} for i in range(${policy.horizon} + 1)]"
- observation.state: "[i / ${fps} for i in range(${policy.horizon} + 1)]"
- action: "[i / ${fps} for i in range(${policy.horizon})]"
- next.reward: "[i / ${fps} for i in range(${policy.horizon})]"
-
-policy:
- name: tdmpc
-
- pretrained_model_path:
-
- # Input / output structure.
- n_action_repeats: 1
- horizon: 5
- n_action_steps: 5
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.environment_state: [16]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.environment_state: min_max
- observation.state: min_max
- output_normalization_modes:
- action: min_max
-
- # Architecture / modeling.
- # Neural networks.
- image_encoder_hidden_dim: 32
- state_encoder_hidden_dim: 256
- latent_dim: 50
- q_ensemble_size: 5
- mlp_dim: 512
- # Reinforcement learning.
- discount: 0.98
-
- # Inference.
- use_mpc: true
- cem_iterations: 6
- max_std: 2.0
- min_std: 0.05
- n_gaussian_samples: 512
- n_pi_samples: 51
- uncertainty_regularizer_coeff: 1.0
- n_elites: 50
- elite_weighting_temperature: 0.5
- gaussian_mean_momentum: 0.1
-
- # Training and loss computation.
- max_random_shift_ratio: 0.0476
- # Loss coefficients.
- reward_coeff: 0.5
- expectile_weight: 0.9
- value_coeff: 0.1
- consistency_coeff: 20.0
- advantage_scaling: 3.0
- pi_coeff: 0.5
- temporal_decay_coeff: 0.5
- # Target model.
- target_model_momentum: 0.995
diff --git a/lerobot/configs/policy/vqbet.yaml b/lerobot/configs/policy/vqbet.yaml
deleted file mode 100644
index cc70d61ad0..0000000000
--- a/lerobot/configs/policy/vqbet.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-# @package _global_
-
-# Defaults for training for the PushT dataset.
-
-seed: 100000
-dataset_repo_id: lerobot/pusht
-
-override_dataset_stats:
- # TODO(rcadene, alexander-soare): should we remove image stats as well? do we use a pretrained vision model?
- observation.image:
- mean: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
- std: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
- # TODO(rcadene, alexander-soare): we override state and action stats to use the same as the pretrained model
- # from the original codebase, but we should remove these and train our own pretrained model
- observation.state:
- min: [13.456424, 32.938293]
- max: [496.14618, 510.9579]
- action:
- min: [12.0, 25.0]
- max: [511.0, 511.0]
-
-training:
- offline_steps: 250000
- online_steps: 0
- eval_freq: 25000
- save_freq: 25000
- save_checkpoint: true
-
- batch_size: 64
- grad_clip_norm: 10
- lr: 1.0e-4
- lr_scheduler: cosine
- lr_warmup_steps: 500
- adam_betas: [0.95, 0.999]
- adam_eps: 1.0e-8
- adam_weight_decay: 1.0e-6
- online_steps_between_rollouts: 1
-
- # VQ-BeT specific
- vqvae_lr: 1.0e-3
- n_vqvae_training_steps: 20000
- bet_weight_decay: 2e-4
- bet_learning_rate: 5.5e-5
- bet_betas: [0.9, 0.999]
-
- delta_timestamps:
- observation.image: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
- observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
- action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, ${policy.n_action_pred_token} + ${policy.action_chunk_size} - 1)]"
-
-eval:
- n_episodes: 50
- batch_size: 50
-
-policy:
- name: vqbet
-
- # Input / output structure.
- n_obs_steps: 5
- n_action_pred_token: 7
- action_chunk_size: 5
-
- input_shapes:
- # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
- observation.image: [3, 96, 96]
- observation.state: ["${env.state_dim}"]
- output_shapes:
- action: ["${env.action_dim}"]
-
- # Normalization / Unnormalization
- input_normalization_modes:
- observation.image: mean_std
- observation.state: min_max
- output_normalization_modes:
- action: min_max
-
- # Architecture / modeling.
- # Vision backbone.
- vision_backbone: resnet18
- crop_shape: [84, 84]
- crop_is_random: True
- pretrained_backbone_weights: null
- use_group_norm: True
- spatial_softmax_num_keypoints: 32
- # VQ-VAE
- n_vqvae_training_steps: ${training.n_vqvae_training_steps}
- vqvae_n_embed: 16
- vqvae_embedding_dim: 256
- vqvae_enc_hidden_dim: 128
- # VQ-BeT
- gpt_block_size: 500
- gpt_input_dim: 512
- gpt_output_dim: 512
- gpt_n_layer: 8
- gpt_n_head: 8
- gpt_hidden_dim: 512
- dropout: 0.1
- mlp_hidden_dim: 1024
- offset_loss_weight: 10000.
- primary_code_loss_weight: 5.0
- secondary_code_loss_weight: 0.5
- bet_softmax_temperature: 0.1
- sequentially_select: False
diff --git a/lerobot/configs/robot/aloha.yaml b/lerobot/configs/robot/aloha.yaml
deleted file mode 100644
index d8bca515f0..0000000000
--- a/lerobot/configs/robot/aloha.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-# [Aloha: A Low-Cost Hardware for Bimanual Teleoperation](https://www.trossenrobotics.com/aloha-stationary)
-# https://aloha-2.github.io
-
-# Requires installing extras packages
-# With pip: `pip install -e ".[dynamixel intelrealsense]"`
-# With poetry: `poetry install --sync --extras "dynamixel intelrealsense"`
-
-# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/9_use_aloha.md)
-
-
-_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot
-robot_type: aloha
-# Specific to Aloha, LeRobot comes with default calibration files. Assuming the motors have been
-# properly assembled, no manual calibration step is expected. If you need to run manual calibration,
-# simply update this path to ".cache/calibration/aloha"
-calibration_dir: .cache/calibration/aloha_default
-
-# /!\ FOR SAFETY, READ THIS /!\
-# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
-# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
-# the number of motors in your follower arms.
-# For Aloha, for every goal position request, motor rotations are capped at 5 degrees by default.
-# When you feel more confident with teleoperation or running the policy, you can extend
-# this safety limit and even removing it by setting it to `null`.
-# Also, everything is expected to work safely out-of-the-box, but we highly advise to
-# first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml),
-# then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully
-max_relative_target: 5
-
-leader_arms:
- left:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/ttyDXL_leader_left
- motors: # window_x
- # name: (index, model)
- waist: [1, xm430-w350]
- shoulder: [2, xm430-w350]
- shoulder_shadow: [3, xm430-w350]
- elbow: [4, xm430-w350]
- elbow_shadow: [5, xm430-w350]
- forearm_roll: [6, xm430-w350]
- wrist_angle: [7, xm430-w350]
- wrist_rotate: [8, xl430-w250]
- gripper: [9, xc430-w150]
- right:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/ttyDXL_leader_right
- motors: # window_x
- # name: (index, model)
- waist: [1, xm430-w350]
- shoulder: [2, xm430-w350]
- shoulder_shadow: [3, xm430-w350]
- elbow: [4, xm430-w350]
- elbow_shadow: [5, xm430-w350]
- forearm_roll: [6, xm430-w350]
- wrist_angle: [7, xm430-w350]
- wrist_rotate: [8, xl430-w250]
- gripper: [9, xc430-w150]
-
-follower_arms:
- left:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/ttyDXL_follower_left
- motors:
- # name: [index, model]
- waist: [1, xm540-w270]
- shoulder: [2, xm540-w270]
- shoulder_shadow: [3, xm540-w270]
- elbow: [4, xm540-w270]
- elbow_shadow: [5, xm540-w270]
- forearm_roll: [6, xm540-w270]
- wrist_angle: [7, xm540-w270]
- wrist_rotate: [8, xm430-w350]
- gripper: [9, xm430-w350]
- right:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/ttyDXL_follower_right
- motors:
- # name: [index, model]
- waist: [1, xm540-w270]
- shoulder: [2, xm540-w270]
- shoulder_shadow: [3, xm540-w270]
- elbow: [4, xm540-w270]
- elbow_shadow: [5, xm540-w270]
- forearm_roll: [6, xm540-w270]
- wrist_angle: [7, xm540-w270]
- wrist_rotate: [8, xm430-w350]
- gripper: [9, xm430-w350]
-
-# Troubleshooting: If one of your IntelRealSense cameras freeze during
-# data recording due to bandwidth limit, you might need to plug the camera
-# on another USB hub or PCIe card.
-cameras:
- cam_high:
- _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera
- serial_number: 128422271347
- fps: 30
- width: 640
- height: 480
- cam_low:
- _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera
- serial_number: 130322270656
- fps: 30
- width: 640
- height: 480
- cam_left_wrist:
- _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera
- serial_number: 218622272670
- fps: 30
- width: 640
- height: 480
- cam_right_wrist:
- _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera
- serial_number: 130322272300
- fps: 30
- width: 640
- height: 480
diff --git a/lerobot/configs/robot/koch.yaml b/lerobot/configs/robot/koch.yaml
deleted file mode 100644
index 40969dc73d..0000000000
--- a/lerobot/configs/robot/koch.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot
-robot_type: koch
-calibration_dir: .cache/calibration/koch
-
-# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
-# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
-# the number of motors in your follower arms.
-max_relative_target: null
-
-leader_arms:
- main:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem575E0031751
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl330-m077"]
- shoulder_lift: [2, "xl330-m077"]
- elbow_flex: [3, "xl330-m077"]
- wrist_flex: [4, "xl330-m077"]
- wrist_roll: [5, "xl330-m077"]
- gripper: [6, "xl330-m077"]
-
-follower_arms:
- main:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem575E0032081
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl430-w250"]
- shoulder_lift: [2, "xl430-w250"]
- elbow_flex: [3, "xl330-m288"]
- wrist_flex: [4, "xl330-m288"]
- wrist_roll: [5, "xl330-m288"]
- gripper: [6, "xl330-m288"]
-
-cameras:
- laptop:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 0
- fps: 30
- width: 640
- height: 480
- phone:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 1
- fps: 30
- width: 640
- height: 480
-
-# ~ Koch specific settings ~
-# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible
-# to squeeze the gripper and have it spring back to an open position on its own.
-gripper_open_degree: 35.156
diff --git a/lerobot/configs/robot/koch_bimanual.yaml b/lerobot/configs/robot/koch_bimanual.yaml
deleted file mode 100644
index b551d15de9..0000000000
--- a/lerobot/configs/robot/koch_bimanual.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot
-robot_type: koch_bimanual
-calibration_dir: .cache/calibration/koch_bimanual
-
-# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
-# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
-# the number of motors in your follower arms.
-max_relative_target: null
-
-leader_arms:
- left:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem585A0085511
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl330-m077"]
- shoulder_lift: [2, "xl330-m077"]
- elbow_flex: [3, "xl330-m077"]
- wrist_flex: [4, "xl330-m077"]
- wrist_roll: [5, "xl330-m077"]
- gripper: [6, "xl330-m077"]
- right:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem575E0031751
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl330-m077"]
- shoulder_lift: [2, "xl330-m077"]
- elbow_flex: [3, "xl330-m077"]
- wrist_flex: [4, "xl330-m077"]
- wrist_roll: [5, "xl330-m077"]
- gripper: [6, "xl330-m077"]
-
-follower_arms:
- left:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem585A0076891
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl430-w250"]
- shoulder_lift: [2, "xl430-w250"]
- elbow_flex: [3, "xl330-m288"]
- wrist_flex: [4, "xl330-m288"]
- wrist_roll: [5, "xl330-m288"]
- gripper: [6, "xl330-m288"]
- right:
- _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
- port: /dev/tty.usbmodem575E0032081
- motors:
- # name: (index, model)
- shoulder_pan: [1, "xl430-w250"]
- shoulder_lift: [2, "xl430-w250"]
- elbow_flex: [3, "xl330-m288"]
- wrist_flex: [4, "xl330-m288"]
- wrist_roll: [5, "xl330-m288"]
- gripper: [6, "xl330-m288"]
-
-cameras:
- laptop:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 0
- fps: 30
- width: 640
- height: 480
- phone:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 1
- fps: 30
- width: 640
- height: 480
-
-# ~ Koch specific settings ~
-# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible
-# to squeeze the gripper and have it spring back to an open position on its own.
-gripper_open_degree: 35.156
diff --git a/lerobot/configs/robot/moss.yaml b/lerobot/configs/robot/moss.yaml
deleted file mode 100644
index 8a9019851b..0000000000
--- a/lerobot/configs/robot/moss.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-# [Moss v1 robot arm](https://github.com/jess-moss/moss-robot-arms)
-
-# Requires installing extras packages
-# With pip: `pip install -e ".[feetech]"`
-# With poetry: `poetry install --sync --extras "feetech"`
-
-# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/11_use_moss.md)
-
-_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot
-robot_type: moss
-calibration_dir: .cache/calibration/moss
-
-# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
-# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
-# the number of motors in your follower arms.
-max_relative_target: null
-
-leader_arms:
- main:
- _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus
- port: /dev/tty.usbmodem58760431091
- motors:
- # name: (index, model)
- shoulder_pan: [1, "sts3215"]
- shoulder_lift: [2, "sts3215"]
- elbow_flex: [3, "sts3215"]
- wrist_flex: [4, "sts3215"]
- wrist_roll: [5, "sts3215"]
- gripper: [6, "sts3215"]
-
-follower_arms:
- main:
- _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus
- port: /dev/tty.usbmodem58760431191
- motors:
- # name: (index, model)
- shoulder_pan: [1, "sts3215"]
- shoulder_lift: [2, "sts3215"]
- elbow_flex: [3, "sts3215"]
- wrist_flex: [4, "sts3215"]
- wrist_roll: [5, "sts3215"]
- gripper: [6, "sts3215"]
-
-cameras:
- laptop:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 0
- fps: 30
- width: 640
- height: 480
- phone:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: 1
- fps: 30
- width: 640
- height: 480
diff --git a/lerobot/configs/robot/stretch.yaml b/lerobot/configs/robot/stretch.yaml
deleted file mode 100644
index e29966b6f3..0000000000
--- a/lerobot/configs/robot/stretch.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# [Stretch3 from Hello Robot](https://hello-robot.com/stretch-3-product)
-
-# Requires installing extras packages
-# With pip: `pip install -e ".[stretch]"`
-# With poetry: `poetry install --sync --extras "stretch"`
-
-# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/8_use_stretch.md)
-
-
-_target_: lerobot.common.robot_devices.robots.stretch.StretchRobot
-robot_type: stretch3
-
-cameras:
- navigation:
- _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
- camera_index: /dev/hello-nav-head-camera
- fps: 10
- width: 1280
- height: 720
- rotation: -90
- head:
- _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera.init_from_name
- name: Intel RealSense D435I
- fps: 30
- width: 640
- height: 480
- rotation: 90
- wrist:
- _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera.init_from_name
- name: Intel RealSense D405
- fps: 30
- width: 640
- height: 480
diff --git a/lerobot/scripts/configure_motor.py b/lerobot/scripts/configure_motor.py
deleted file mode 100644
index f48b2740b7..0000000000
--- a/lerobot/scripts/configure_motor.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""
-This script configure a single motor at a time to a given ID and baudrate.
-
-Example of usage:
-```bash
-python lerobot/scripts/configure_motor.py \
- --port /dev/tty.usbmodem585A0080521 \
- --brand feetech \
- --model sts3215 \
- --baudrate 1000000 \
- --ID 1
-```
-"""
-
-import argparse
-import time
-
-
-def configure_motor(port, brand, model, motor_idx_des, baudrate_des):
- if brand == "feetech":
- from lerobot.common.robot_devices.motors.feetech import MODEL_BAUDRATE_TABLE
- from lerobot.common.robot_devices.motors.feetech import (
- SCS_SERIES_BAUDRATE_TABLE as SERIES_BAUDRATE_TABLE,
- )
- from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus as MotorsBusClass
- elif brand == "dynamixel":
- from lerobot.common.robot_devices.motors.dynamixel import MODEL_BAUDRATE_TABLE
- from lerobot.common.robot_devices.motors.dynamixel import (
- X_SERIES_BAUDRATE_TABLE as SERIES_BAUDRATE_TABLE,
- )
- from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus as MotorsBusClass
- else:
- raise ValueError(
- f"Currently we do not support this motor brand: {brand}. We currently support feetech and dynamixel motors."
- )
-
- # Check if the provided model exists in the model_baud_rate_table
- if model not in MODEL_BAUDRATE_TABLE:
- raise ValueError(
- f"Invalid model '{model}' for brand '{brand}'. Supported models: {list(MODEL_BAUDRATE_TABLE.keys())}"
- )
-
- # Setup motor names, indices, and models
- motor_name = "motor"
- motor_index_arbitrary = motor_idx_des # Use the motor ID passed via argument
- motor_model = model # Use the motor model passed via argument
-
- # Initialize the MotorBus with the correct port and motor configurations
- motor_bus = MotorsBusClass(port=port, motors={motor_name: (motor_index_arbitrary, motor_model)})
-
- # Try to connect to the motor bus and handle any connection-specific errors
- try:
- motor_bus.connect()
- print(f"Connected on port {motor_bus.port}")
- except OSError as e:
- print(f"Error occurred when connecting to the motor bus: {e}")
- return
-
- # Motor bus is connected, proceed with the rest of the operations
- try:
- print("Scanning all baudrates and motor indices")
- all_baudrates = set(SERIES_BAUDRATE_TABLE.values())
- motor_index = -1 # Set the motor index to an out-of-range value.
-
- for baudrate in all_baudrates:
- motor_bus.set_bus_baudrate(baudrate)
- present_ids = motor_bus.find_motor_indices(list(range(1, 10)))
- if len(present_ids) > 1:
- raise ValueError(
- "Error: More than one motor ID detected. This script is designed to only handle one motor at a time. Please disconnect all but one motor."
- )
-
- if len(present_ids) == 1:
- if motor_index != -1:
- raise ValueError(
- "Error: More than one motor ID detected. This script is designed to only handle one motor at a time. Please disconnect all but one motor."
- )
- motor_index = present_ids[0]
- break
-
- if motor_index == -1:
- raise ValueError("No motors detected. Please ensure you have one motor connected.")
-
- print(f"Motor index found at: {motor_index}")
-
- if brand == "feetech":
- # Allows ID and BAUDRATE to be written in memory
- motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "Lock", 0)
-
- if baudrate != baudrate_des:
- print(f"Setting its baudrate to {baudrate_des}")
- baudrate_idx = list(SERIES_BAUDRATE_TABLE.values()).index(baudrate_des)
-
- # The write can fail, so we allow retries
- motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "Baud_Rate", baudrate_idx)
- time.sleep(0.5)
- motor_bus.set_bus_baudrate(baudrate_des)
- present_baudrate_idx = motor_bus.read_with_motor_ids(
- motor_bus.motor_models, motor_index, "Baud_Rate", num_retry=2
- )
-
- if present_baudrate_idx != baudrate_idx:
- raise OSError("Failed to write baudrate.")
-
- print(f"Setting its index to desired index {motor_idx_des}")
- if brand == "feetech":
- motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "Lock", 0)
- motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "ID", motor_idx_des)
-
- present_idx = motor_bus.read_with_motor_ids(motor_bus.motor_models, motor_idx_des, "ID", num_retry=2)
- if present_idx != motor_idx_des:
- raise OSError("Failed to write index.")
-
- if brand == "feetech":
- # Set Maximum_Acceleration to 254 to speedup acceleration and deceleration of
- # the motors. Note: this configuration is not in the official STS3215 Memory Table
- motor_bus.write("Lock", 0)
- motor_bus.write("Maximum_Acceleration", 254)
-
- motor_bus.write("Goal_Position", 2048)
- time.sleep(4)
- print("Present Position", motor_bus.read("Present_Position"))
-
- motor_bus.write("Offset", 0)
- time.sleep(4)
- print("Offset", motor_bus.read("Offset"))
-
- except Exception as e:
- print(f"Error occurred during motor configuration: {e}")
-
- finally:
- motor_bus.disconnect()
- print("Disconnected from motor bus.")
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--port", type=str, required=True, help="Motors bus port (e.g. dynamixel,feetech)")
- parser.add_argument("--brand", type=str, required=True, help="Motor brand (e.g. dynamixel,feetech)")
- parser.add_argument("--model", type=str, required=True, help="Motor model (e.g. xl330-m077,sts3215)")
- parser.add_argument("--ID", type=int, required=True, help="Desired ID of the current motor (e.g. 1,2,3)")
- parser.add_argument(
- "--baudrate", type=int, default=1000000, help="Desired baudrate for the motor (default: 1000000)"
- )
- args = parser.parse_args()
-
- configure_motor(args.port, args.brand, args.model, args.ID, args.baudrate)
diff --git a/lerobot/scripts/control_robot.py b/lerobot/scripts/control_robot.py
deleted file mode 100644
index 30f22cb004..0000000000
--- a/lerobot/scripts/control_robot.py
+++ /dev/null
@@ -1,575 +0,0 @@
-"""
-Utilities to control a robot.
-
-Useful to record a dataset, replay a recorded episode, run the policy on your robot
-and record an evaluation dataset, and to recalibrate your robot if needed.
-
-Examples of usage:
-
-- Recalibrate your robot:
-```bash
-python lerobot/scripts/control_robot.py calibrate
-```
-
-- Unlimited teleoperation at highest frequency (~200 Hz is expected), to exit with CTRL+C:
-```bash
-python lerobot/scripts/control_robot.py teleoperate
-
-# Remove the cameras from the robot definition. They are not used in 'teleoperate' anyway.
-python lerobot/scripts/control_robot.py teleoperate --robot-overrides '~cameras'
-```
-
-- Unlimited teleoperation at a limited frequency of 30 Hz, to simulate data recording frequency:
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --fps 30
-```
-
-- Record one episode in order to test replay:
-```bash
-python lerobot/scripts/control_robot.py record \
- --fps 30 \
- --repo-id $USER/koch_test \
- --num-episodes 1 \
- --run-compute-stats 0
-```
-
-- Visualize dataset:
-```bash
-python lerobot/scripts/visualize_dataset.py \
- --repo-id $USER/koch_test \
- --episode-index 0
-```
-
-- Replay this test episode:
-```bash
-python lerobot/scripts/control_robot.py replay \
- --fps 30 \
- --repo-id $USER/koch_test \
- --episode 0
-```
-
-- Record a full dataset in order to train a policy, with 2 seconds of warmup,
-30 seconds of recording for each episode, and 10 seconds to reset the environment in between episodes:
-```bash
-python lerobot/scripts/control_robot.py record \
- --fps 30 \
- --repo-id $USER/koch_pick_place_lego \
- --num-episodes 50 \
- --warmup-time-s 2 \
- --episode-time-s 30 \
- --reset-time-s 10
-```
-
-**NOTE**: You can use your keyboard to control data recording flow.
-- Tap right arrow key '->' to early exit while recording an episode and go to resseting the environment.
-- Tap right arrow key '->' to early exit while resetting the environment and got to recording the next episode.
-- Tap left arrow key '<-' to early exit and re-record the current episode.
-- Tap escape key 'esc' to stop the data recording.
-This might require a sudo permission to allow your terminal to monitor keyboard events.
-
-**NOTE**: You can resume/continue data recording by running the same data recording command and adding `--resume 1`.
-If the dataset you want to extend is not on the hub, you also need to add `--local-files-only 1`.
-
-- Train on this dataset with the ACT policy:
-```bash
-python lerobot/scripts/train.py \
- policy=act_koch_real \
- env=koch_real \
- dataset_repo_id=$USER/koch_pick_place_lego \
- hydra.run.dir=outputs/train/act_koch_real
-```
-
-- Run the pretrained policy on the robot:
-```bash
-python lerobot/scripts/control_robot.py record \
- --fps 30 \
- --repo-id $USER/eval_act_koch_real \
- --num-episodes 10 \
- --warmup-time-s 2 \
- --episode-time-s 30 \
- --reset-time-s 10
- -p outputs/train/act_koch_real/checkpoints/080000/pretrained_model
-```
-"""
-
-import argparse
-import logging
-import time
-from pathlib import Path
-from typing import List
-
-# from safetensors.torch import load_file, save_file
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.common.robot_devices.control_utils import (
- control_loop,
- has_method,
- init_keyboard_listener,
- init_policy,
- log_control_info,
- record_episode,
- reset_environment,
- sanity_check_dataset_name,
- sanity_check_dataset_robot_compatibility,
- stop_recording,
- warmup_record,
-)
-from lerobot.common.robot_devices.robots.factory import make_robot
-from lerobot.common.robot_devices.robots.utils import Robot
-from lerobot.common.robot_devices.utils import busy_wait, safe_disconnect
-from lerobot.common.utils.utils import init_hydra_config, init_logging, log_say, none_or_int
-
-########################################################################################
-# Control modes
-########################################################################################
-
-
-@safe_disconnect
-def calibrate(robot: Robot, arms: list[str] | None):
- # TODO(aliberts): move this code in robots' classes
- if robot.robot_type.startswith("stretch"):
- if not robot.is_connected:
- robot.connect()
- if not robot.is_homed():
- robot.home()
- return
-
- if arms is None:
- arms = robot.available_arms
-
- unknown_arms = [arm_id for arm_id in arms if arm_id not in robot.available_arms]
- available_arms_str = " ".join(robot.available_arms)
- unknown_arms_str = " ".join(unknown_arms)
-
- if arms is None or len(arms) == 0:
- raise ValueError(
- "No arm provided. Use `--arms` as argument with one or more available arms.\n"
- f"For instance, to recalibrate all arms add: `--arms {available_arms_str}`"
- )
-
- if len(unknown_arms) > 0:
- raise ValueError(
- f"Unknown arms provided ('{unknown_arms_str}'). Available arms are `{available_arms_str}`."
- )
-
- for arm_id in arms:
- arm_calib_path = robot.calibration_dir / f"{arm_id}.json"
- if arm_calib_path.exists():
- print(f"Removing '{arm_calib_path}'")
- arm_calib_path.unlink()
- else:
- print(f"Calibration file not found '{arm_calib_path}'")
-
- if robot.is_connected:
- robot.disconnect()
-
- # Calling `connect` automatically runs calibration
- # when the calibration file is missing
- robot.connect()
- robot.disconnect()
- print("Calibration is done! You can now teleoperate and record datasets!")
-
-
-@safe_disconnect
-def teleoperate(
- robot: Robot, fps: int | None = None, teleop_time_s: float | None = None, display_cameras: bool = False
-):
- control_loop(
- robot,
- control_time_s=teleop_time_s,
- fps=fps,
- teleoperate=True,
- display_cameras=display_cameras,
- )
-
-
-@safe_disconnect
-def record(
- robot: Robot,
- root: Path,
- repo_id: str,
- single_task: str,
- pretrained_policy_name_or_path: str | None = None,
- policy_overrides: List[str] | None = None,
- fps: int | None = None,
- warmup_time_s: int | float = 2,
- episode_time_s: int | float = 10,
- reset_time_s: int | float = 5,
- num_episodes: int = 50,
- video: bool = True,
- run_compute_stats: bool = True,
- push_to_hub: bool = True,
- tags: list[str] | None = None,
- num_image_writer_processes: int = 0,
- num_image_writer_threads_per_camera: int = 4,
- display_cameras: bool = True,
- play_sounds: bool = True,
- resume: bool = False,
- # TODO(rcadene, aliberts): remove local_files_only when refactor with dataset as argument
- local_files_only: bool = False,
-) -> LeRobotDataset:
- # TODO(rcadene): Add option to record logs
- listener = None
- events = None
- policy = None
- device = None
- use_amp = None
-
- if single_task:
- task = single_task
- else:
- raise NotImplementedError("Only single-task recording is supported for now")
-
- # Load pretrained policy
- if pretrained_policy_name_or_path is not None:
- policy, policy_fps, device, use_amp = init_policy(pretrained_policy_name_or_path, policy_overrides)
-
- if fps is None:
- fps = policy_fps
- logging.warning(f"No fps provided, so using the fps from policy config ({policy_fps}).")
- elif fps != policy_fps:
- logging.warning(
- f"There is a mismatch between the provided fps ({fps}) and the one from policy config ({policy_fps})."
- )
-
- if resume:
- dataset = LeRobotDataset(
- repo_id,
- root=root,
- local_files_only=local_files_only,
- )
- dataset.start_image_writer(
- num_processes=num_image_writer_processes,
- num_threads=num_image_writer_threads_per_camera * len(robot.cameras),
- )
- sanity_check_dataset_robot_compatibility(dataset, robot, fps, video)
- else:
- # Create empty dataset or load existing saved episodes
- sanity_check_dataset_name(repo_id, policy)
- dataset = LeRobotDataset.create(
- repo_id,
- fps,
- root=root,
- robot=robot,
- use_videos=video,
- image_writer_processes=num_image_writer_processes,
- image_writer_threads=num_image_writer_threads_per_camera * len(robot.cameras),
- )
-
- if not robot.is_connected:
- robot.connect()
-
- listener, events = init_keyboard_listener()
-
- # Execute a few seconds without recording to:
- # 1. teleoperate the robot to move it in starting position if no policy provided,
- # 2. give times to the robot devices to connect and start synchronizing,
- # 3. place the cameras windows on screen
- enable_teleoperation = policy is None
- log_say("Warmup record", play_sounds)
- warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps)
-
- if has_method(robot, "teleop_safety_stop"):
- robot.teleop_safety_stop()
-
- recorded_episodes = 0
- while True:
- if recorded_episodes >= num_episodes:
- break
-
- # TODO(aliberts): add task prompt for multitask here. Might need to temporarily disable event if
- # input() messes with them.
- # if multi_task:
- # task = input("Enter your task description: ")
-
- log_say(f"Recording episode {dataset.num_episodes}", play_sounds)
- record_episode(
- dataset=dataset,
- robot=robot,
- events=events,
- episode_time_s=episode_time_s,
- display_cameras=display_cameras,
- policy=policy,
- device=device,
- use_amp=use_amp,
- fps=fps,
- )
-
- # Execute a few seconds without recording to give time to manually reset the environment
- # Current code logic doesn't allow to teleoperate during this time.
- # TODO(rcadene): add an option to enable teleoperation during reset
- # Skip reset for the last episode to be recorded
- if not events["stop_recording"] and (
- (recorded_episodes < num_episodes - 1) or events["rerecord_episode"]
- ):
- log_say("Reset the environment", play_sounds)
- reset_environment(robot, events, reset_time_s)
-
- if events["rerecord_episode"]:
- log_say("Re-record episode", play_sounds)
- events["rerecord_episode"] = False
- events["exit_early"] = False
- dataset.clear_episode_buffer()
- continue
-
- dataset.save_episode(task)
- recorded_episodes += 1
-
- if events["stop_recording"]:
- break
-
- log_say("Stop recording", play_sounds, blocking=True)
- stop_recording(robot, listener, display_cameras)
-
- if run_compute_stats:
- logging.info("Computing dataset statistics")
-
- dataset.consolidate(run_compute_stats)
-
- if push_to_hub:
- dataset.push_to_hub(tags=tags)
-
- log_say("Exiting", play_sounds)
- return dataset
-
-
-@safe_disconnect
-def replay(
- robot: Robot,
- root: Path,
- repo_id: str,
- episode: int,
- fps: int | None = None,
- play_sounds: bool = True,
- local_files_only: bool = False,
-):
- # TODO(rcadene, aliberts): refactor with control_loop, once `dataset` is an instance of LeRobotDataset
- # TODO(rcadene): Add option to record logs
-
- dataset = LeRobotDataset(repo_id, root=root, episodes=[episode], local_files_only=local_files_only)
- actions = dataset.hf_dataset.select_columns("action")
-
- if not robot.is_connected:
- robot.connect()
-
- log_say("Replaying episode", play_sounds, blocking=True)
- for idx in range(dataset.num_frames):
- start_episode_t = time.perf_counter()
-
- action = actions[idx]["action"]
- robot.send_action(action)
-
- dt_s = time.perf_counter() - start_episode_t
- busy_wait(1 / fps - dt_s)
-
- dt_s = time.perf_counter() - start_episode_t
- log_control_info(robot, dt_s, fps=fps)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- subparsers = parser.add_subparsers(dest="mode", required=True)
-
- # Set common options for all the subparsers
- base_parser = argparse.ArgumentParser(add_help=False)
- base_parser.add_argument(
- "--robot-path",
- type=str,
- default="lerobot/configs/robot/koch.yaml",
- help="Path to robot yaml file used to instantiate the robot using `make_robot` factory function.",
- )
- base_parser.add_argument(
- "--robot-overrides",
- type=str,
- nargs="*",
- help="Any key=value arguments to override config values (use dots for.nested=overrides)",
- )
-
- parser_calib = subparsers.add_parser("calibrate", parents=[base_parser])
- parser_calib.add_argument(
- "--arms",
- type=str,
- nargs="*",
- help="List of arms to calibrate (e.g. `--arms left_follower right_follower left_leader`)",
- )
-
- parser_teleop = subparsers.add_parser("teleoperate", parents=[base_parser])
- parser_teleop.add_argument(
- "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)"
- )
- parser_teleop.add_argument(
- "--display-cameras",
- type=int,
- default=1,
- help="Display all cameras on screen (set to 1 to display or 0).",
- )
-
- parser_record = subparsers.add_parser("record", parents=[base_parser])
- task_args = parser_record.add_mutually_exclusive_group(required=True)
- parser_record.add_argument(
- "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)"
- )
- task_args.add_argument(
- "--single-task",
- type=str,
- help="A short but accurate description of the task performed during the recording.",
- )
- # TODO(aliberts): add multi-task support
- # task_args.add_argument(
- # "--multi-task",
- # type=int,
- # help="You will need to enter the task performed at the start of each episode.",
- # )
- parser_record.add_argument(
- "--root",
- type=Path,
- default=None,
- help="Root directory where the dataset will be stored (e.g. 'dataset/path').",
- )
- parser_record.add_argument(
- "--repo-id",
- type=str,
- default="lerobot/test",
- help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).",
- )
- parser_record.add_argument(
- "--local-files-only",
- type=int,
- default=0,
- help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.",
- )
- parser_record.add_argument(
- "--warmup-time-s",
- type=int,
- default=10,
- help="Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize.",
- )
- parser_record.add_argument(
- "--episode-time-s",
- type=int,
- default=60,
- help="Number of seconds for data recording for each episode.",
- )
- parser_record.add_argument(
- "--reset-time-s",
- type=int,
- default=60,
- help="Number of seconds for resetting the environment after each episode.",
- )
- parser_record.add_argument("--num-episodes", type=int, default=50, help="Number of episodes to record.")
- parser_record.add_argument(
- "--run-compute-stats",
- type=int,
- default=1,
- help="By default, run the computation of the data statistics at the end of data collection. Compute intensive and not required to just replay an episode.",
- )
- parser_record.add_argument(
- "--push-to-hub",
- type=int,
- default=1,
- help="Upload dataset to Hugging Face hub.",
- )
- parser_record.add_argument(
- "--tags",
- type=str,
- nargs="*",
- help="Add tags to your dataset on the hub.",
- )
- parser_record.add_argument(
- "--num-image-writer-processes",
- type=int,
- default=0,
- help=(
- "Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; "
- "set to ≥1 to use subprocesses, each using threads to write images. The best number of processes "
- "and threads depends on your system. We recommend 4 threads per camera with 0 processes. "
- "If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses."
- ),
- )
- parser_record.add_argument(
- "--num-image-writer-threads-per-camera",
- type=int,
- default=4,
- help=(
- "Number of threads writing the frames as png images on disk, per camera. "
- "Too many threads might cause unstable teleoperation fps due to main thread being blocked. "
- "Not enough threads might cause low camera fps."
- ),
- )
- parser_record.add_argument(
- "--resume",
- type=int,
- default=0,
- help="Resume recording on an existing dataset.",
- )
- parser_record.add_argument(
- "-p",
- "--pretrained-policy-name-or-path",
- type=str,
- help=(
- "Either the repo ID of a model hosted on the Hub or a path to a directory containing weights "
- "saved using `Policy.save_pretrained`."
- ),
- )
- parser_record.add_argument(
- "--policy-overrides",
- type=str,
- nargs="*",
- help="Any key=value arguments to override config values (use dots for.nested=overrides)",
- )
-
- parser_replay = subparsers.add_parser("replay", parents=[base_parser])
- parser_replay.add_argument(
- "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)"
- )
- parser_replay.add_argument(
- "--root",
- type=Path,
- default=None,
- help="Root directory where the dataset will be stored (e.g. 'dataset/path').",
- )
- parser_replay.add_argument(
- "--repo-id",
- type=str,
- default="lerobot/test",
- help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).",
- )
- parser_replay.add_argument(
- "--local-files-only",
- type=int,
- default=0,
- help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.",
- )
- parser_replay.add_argument("--episode", type=int, default=0, help="Index of the episode to replay.")
-
- args = parser.parse_args()
-
- init_logging()
-
- control_mode = args.mode
- robot_path = args.robot_path
- robot_overrides = args.robot_overrides
- kwargs = vars(args)
- del kwargs["mode"]
- del kwargs["robot_path"]
- del kwargs["robot_overrides"]
-
- robot_cfg = init_hydra_config(robot_path, robot_overrides)
- robot = make_robot(robot_cfg)
-
- if control_mode == "calibrate":
- calibrate(robot, **kwargs)
-
- elif control_mode == "teleoperate":
- teleoperate(robot, **kwargs)
-
- elif control_mode == "record":
- record(robot, **kwargs)
-
- elif control_mode == "replay":
- replay(robot, **kwargs)
-
- if robot.is_connected:
- # Disconnect manually to avoid a "Core dump" during process
- # termination due to camera threads not properly exiting.
- robot.disconnect()
diff --git a/lerobot/scripts/control_sim_robot.py b/lerobot/scripts/control_sim_robot.py
deleted file mode 100644
index 4fffa8c754..0000000000
--- a/lerobot/scripts/control_sim_robot.py
+++ /dev/null
@@ -1,546 +0,0 @@
-"""
-Utilities to control a robot in simulation.
-
-Useful to record a dataset, replay a recorded episode and record an evaluation dataset.
-
-Examples of usage:
-
-
-- Unlimited teleoperation at a limited frequency of 30 Hz, to simulate data recording frequency.
- You can modify this value depending on how fast your simulation can run:
-```bash
-python lerobot/scripts/control_robot.py teleoperate \
- --fps 30 \
- --robot-path lerobot/configs/robot/your_robot_config.yaml \
- --sim-config lerobot/configs/env/your_sim_config.yaml
-```
-
-- Record one episode in order to test replay:
-```bash
-python lerobot/scripts/control_sim_robot.py record \
- --robot-path lerobot/configs/robot/your_robot_config.yaml \
- --sim-config lerobot/configs/env/your_sim_config.yaml \
- --fps 30 \
- --repo-id $USER/robot_sim_test \
- --num-episodes 1 \
- --run-compute-stats 0
-```
-
-Enable the --push-to-hub 1 to push the recorded dataset to the huggingface hub.
-
-- Visualize dataset:
-```bash
-python lerobot/scripts/visualize_dataset.py \
- --repo-id $USER/robot_sim_test \
- --episode-index 0
-```
-
-- Replay a sequence of test episodes:
-```bash
-python lerobot/scripts/control_sim_robot.py replay \
- --robot-path lerobot/configs/robot/your_robot_config.yaml \
- --sim-config lerobot/configs/env/your_sim_config.yaml \
- --fps 30 \
- --repo-id $USER/robot_sim_test \
- --episode 0
-```
-Note: The seed is saved, therefore, during replay we can load the same environment state as the one during collection.
-
-- Record a full dataset in order to train a policy,
-30 seconds of recording for each episode, and 10 seconds to reset the environment in between episodes:
-```bash
-python lerobot/scripts/control_sim_robot.py record \
- --robot-path lerobot/configs/robot/your_robot_config.yaml \
- --sim-config lerobot/configs/env/your_sim_config.yaml \
- --fps 30 \
- --repo-id $USER/robot_sim_test \
- --num-episodes 50 \
- --episode-time-s 30 \
-```
-
-**NOTE**: You can use your keyboard to control data recording flow.
-- Tap right arrow key '->' to early exit while recording an episode and go to reseting the environment.
-- Tap right arrow key '->' to early exit while reseting the environment and got to recording the next episode.
-- Tap left arrow key '<-' to early exit and re-record the current episode.
-- Tap escape key 'esc' to stop the data recording.
-This might require a sudo permission to allow your terminal to monitor keyboard events.
-
-**NOTE**: You can resume/continue data recording by running the same data recording command twice.
-"""
-
-import argparse
-import importlib
-import logging
-import time
-from pathlib import Path
-
-import cv2
-import gymnasium as gym
-import numpy as np
-import torch
-
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.common.robot_devices.control_utils import (
- init_keyboard_listener,
- init_policy,
- is_headless,
- log_control_info,
- predict_action,
- sanity_check_dataset_name,
- sanity_check_dataset_robot_compatibility,
- stop_recording,
-)
-from lerobot.common.robot_devices.robots.factory import make_robot
-from lerobot.common.robot_devices.robots.utils import Robot
-from lerobot.common.robot_devices.utils import busy_wait
-from lerobot.common.utils.utils import init_hydra_config, init_logging, log_say
-
-DEFAULT_FEATURES = {
- "next.reward": {
- "dtype": "float32",
- "shape": (1,),
- "names": None,
- },
- "next.success": {
- "dtype": "bool",
- "shape": (1,),
- "names": None,
- },
- "seed": {
- "dtype": "int64",
- "shape": (1,),
- "names": None,
- },
- "timestamp": {
- "dtype": "float32",
- "shape": (1,),
- "names": None,
- },
-}
-
-
-########################################################################################
-# Utilities
-########################################################################################
-def none_or_int(value):
- if value == "None":
- return None
- return int(value)
-
-
-def init_sim_calibration(robot, cfg):
- # Constants necessary for transforming the joint pos of the real robot to the sim
- # depending on the robot discription used in that sim.
- start_pos = np.array(robot.leader_arms.main.calibration["start_pos"])
- axis_directions = np.array(cfg.get("axis_directions", [1]))
- offsets = np.array(cfg.get("offsets", [0])) * np.pi
-
- return {"start_pos": start_pos, "axis_directions": axis_directions, "offsets": offsets}
-
-
-def real_positions_to_sim(real_positions, axis_directions, start_pos, offsets):
- """Counts - starting position -> radians -> align axes -> offset"""
- return axis_directions * (real_positions - start_pos) * 2.0 * np.pi / 4096 + offsets
-
-
-########################################################################################
-# Control modes
-########################################################################################
-
-
-def teleoperate(env, robot: Robot, process_action_fn, teleop_time_s=None):
- env = env()
- env.reset()
- start_teleop_t = time.perf_counter()
- while True:
- leader_pos = robot.leader_arms.main.read("Present_Position")
- action = process_action_fn(leader_pos)
- env.step(np.expand_dims(action, 0))
- if teleop_time_s is not None and time.perf_counter() - start_teleop_t > teleop_time_s:
- print("Teleoperation processes finished.")
- break
-
-
-def record(
- env,
- robot: Robot,
- process_action_from_leader,
- root: Path,
- repo_id: str,
- task: str,
- fps: int | None = None,
- tags: list[str] | None = None,
- pretrained_policy_name_or_path: str = None,
- policy_overrides: bool | None = None,
- episode_time_s: int = 30,
- num_episodes: int = 50,
- video: bool = True,
- push_to_hub: bool = True,
- num_image_writer_processes: int = 0,
- num_image_writer_threads_per_camera: int = 4,
- display_cameras: bool = False,
- play_sounds: bool = True,
- resume: bool = False,
- local_files_only: bool = False,
- run_compute_stats: bool = True,
-) -> LeRobotDataset:
- # Load pretrained policy
- policy = None
- if pretrained_policy_name_or_path is not None:
- policy, policy_fps, device, use_amp = init_policy(pretrained_policy_name_or_path, policy_overrides)
-
- if fps is None:
- fps = policy_fps
- logging.warning(f"No fps provided, so using the fps from policy config ({policy_fps}).")
-
- if policy is None and process_action_from_leader is None:
- raise ValueError("Either policy or process_action_fn has to be set to enable control in sim.")
-
- # initialize listener before sim env
- listener, events = init_keyboard_listener()
-
- # create sim env
- env = env()
-
- # Create empty dataset or load existing saved episodes
- num_cameras = sum([1 if "image" in key else 0 for key in env.observation_space])
-
- # get image keys
- image_keys = [key for key in env.observation_space if "image" in key]
- state_keys_dict = env_cfg.state_keys
-
- if resume:
- dataset = LeRobotDataset(
- repo_id,
- root=root,
- local_files_only=local_files_only,
- )
- dataset.start_image_writer(
- num_processes=num_image_writer_processes,
- num_threads=num_image_writer_threads_per_camera * num_cameras,
- )
- sanity_check_dataset_robot_compatibility(dataset, robot, fps, video)
- else:
- features = DEFAULT_FEATURES
- # add image keys to features
- for key in image_keys:
- shape = env.observation_space[key].shape
- if not key.startswith("observation.image."):
- key = "observation.image." + key
- features[key] = {"dtype": "video", "names": ["channel", "height", "width"], "shape": shape}
-
- for key, obs_key in state_keys_dict.items():
- features[key] = {
- "dtype": "float32",
- "names": None,
- "shape": env.observation_space[obs_key].shape,
- }
-
- features["action"] = {"dtype": "float32", "shape": env.action_space.shape, "names": None}
-
- # Create empty dataset or load existing saved episodes
- sanity_check_dataset_name(repo_id, policy)
- dataset = LeRobotDataset.create(
- repo_id,
- fps,
- root=root,
- features=features,
- use_videos=video,
- image_writer_processes=num_image_writer_processes,
- image_writer_threads=num_image_writer_threads_per_camera * num_cameras,
- )
-
- recorded_episodes = 0
- while True:
- log_say(f"Recording episode {dataset.num_episodes}", play_sounds)
-
- if events is None:
- events = {"exit_early": False}
-
- if episode_time_s is None:
- episode_time_s = float("inf")
-
- timestamp = 0
- start_episode_t = time.perf_counter()
-
- seed = np.random.randint(0, 1e5)
- observation, info = env.reset(seed=seed)
-
- while timestamp < episode_time_s:
- start_loop_t = time.perf_counter()
-
- if policy is not None:
- action = predict_action(observation, policy, device, use_amp)
- else:
- leader_pos = robot.leader_arms.main.read("Present_Position")
- action = process_action_from_leader(leader_pos)
-
- observation, reward, terminated, _, info = env.step(action)
-
- success = info.get("is_success", False)
- env_timestamp = info.get("timestamp", dataset.episode_buffer["size"] / fps)
-
- frame = {
- "action": torch.from_numpy(action),
- "next.reward": reward,
- "next.success": success,
- "seed": seed,
- "timestamp": env_timestamp,
- }
-
- for key in image_keys:
- if not key.startswith("observation.image"):
- frame["observation.image." + key] = observation[key]
- else:
- frame[key] = observation[key]
-
- for key, obs_key in state_keys_dict.items():
- frame[key] = torch.from_numpy(observation[obs_key])
-
- dataset.add_frame(frame)
-
- if display_cameras and not is_headless():
- for key in image_keys:
- cv2.imshow(key, cv2.cvtColor(observation[key], cv2.COLOR_RGB2BGR))
- cv2.waitKey(1)
-
- if fps is not None:
- dt_s = time.perf_counter() - start_loop_t
- busy_wait(1 / fps - dt_s)
-
- dt_s = time.perf_counter() - start_loop_t
- log_control_info(robot, dt_s, fps=fps)
-
- timestamp = time.perf_counter() - start_episode_t
- if events["exit_early"] or terminated:
- events["exit_early"] = False
- break
-
- if events["rerecord_episode"]:
- log_say("Re-record episode", play_sounds)
- events["rerecord_episode"] = False
- events["exit_early"] = False
- dataset.clear_episode_buffer()
- continue
-
- dataset.save_episode(task=task)
- recorded_episodes += 1
-
- if events["stop_recording"] or recorded_episodes >= num_episodes:
- break
- else:
- logging.info("Waiting for a few seconds before starting next episode recording...")
- busy_wait(3)
-
- log_say("Stop recording", play_sounds, blocking=True)
- stop_recording(robot, listener, display_cameras)
-
- if run_compute_stats:
- logging.info("Computing dataset statistics")
- dataset.consolidate(run_compute_stats)
-
- if push_to_hub:
- dataset.push_to_hub(tags=tags)
-
- log_say("Exiting", play_sounds)
- return dataset
-
-
-def replay(
- env, root: Path, repo_id: str, episode: int, fps: int | None = None, local_files_only: bool = True
-):
- env = env()
-
- local_dir = Path(root) / repo_id
- if not local_dir.exists():
- raise ValueError(local_dir)
-
- dataset = LeRobotDataset(repo_id, root=root, local_files_only=local_files_only)
- items = dataset.hf_dataset.select_columns("action")
- seeds = dataset.hf_dataset.select_columns("seed")["seed"]
-
- from_idx = dataset.episode_data_index["from"][episode].item()
- to_idx = dataset.episode_data_index["to"][episode].item()
- env.reset(seed=seeds[from_idx].item())
- logging.info("Replaying episode")
- log_say("Replaying episode", play_sounds=True)
- for idx in range(from_idx, to_idx):
- start_episode_t = time.perf_counter()
- action = items[idx]["action"]
- env.step(action.unsqueeze(0).numpy())
- dt_s = time.perf_counter() - start_episode_t
- busy_wait(1 / fps - dt_s)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- subparsers = parser.add_subparsers(dest="mode", required=True)
-
- # Set common options for all the subparsers
- base_parser = argparse.ArgumentParser(add_help=False)
- base_parser.add_argument(
- "--robot-path",
- type=str,
- default="lerobot/configs/robot/koch.yaml",
- help="Path to robot yaml file used to instantiate the robot using `make_robot` factory function.",
- )
-
- base_parser.add_argument(
- "--sim-config",
- help="Path to a yaml config you want to use for initializing a sim environment based on gym ",
- )
-
- parser_record = subparsers.add_parser("teleoperate", parents=[base_parser])
-
- parser_record = subparsers.add_parser("record", parents=[base_parser])
- parser_record.add_argument(
- "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)"
- )
- parser_record.add_argument(
- "--root",
- type=Path,
- default=None,
- help="Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').",
- )
- parser_record.add_argument(
- "--repo-id",
- type=str,
- default="lerobot/test",
- help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).",
- )
- parser_record.add_argument(
- "--episode-time-s",
- type=int,
- default=60,
- help="Number of seconds for data recording for each episode.",
- )
- parser_record.add_argument(
- "--task",
- type=str,
- required=True,
- help="A description of the task preformed during recording that can be used as a language instruction.",
- )
- parser_record.add_argument("--num-episodes", type=int, default=50, help="Number of episodes to record.")
- parser_record.add_argument(
- "--run-compute-stats",
- type=int,
- default=1,
- help="By default, run the computation of the data statistics at the end of data collection. Compute intensive and not required to just replay an episode.",
- )
- parser_record.add_argument(
- "--push-to-hub",
- type=int,
- default=1,
- help="Upload dataset to Hugging Face hub.",
- )
- parser_record.add_argument(
- "--tags",
- type=str,
- nargs="*",
- help="Add tags to your dataset on the hub.",
- )
- parser_record.add_argument(
- "--num-image-writer-processes",
- type=int,
- default=0,
- help=(
- "Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; "
- "set to ≥1 to use subprocesses, each using threads to write images. The best number of processes "
- "and threads depends on your system. We recommend 4 threads per camera with 0 processes. "
- "If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses."
- ),
- )
- parser_record.add_argument(
- "--num-image-writer-threads-per-camera",
- type=int,
- default=4,
- help=(
- "Number of threads writing the frames as png images on disk, per camera. "
- "Too much threads might cause unstable teleoperation fps due to main thread being blocked. "
- "Not enough threads might cause low camera fps."
- ),
- )
- parser_record.add_argument(
- "--display-cameras",
- type=int,
- default=0,
- help="Visualize image observations with opencv.",
- )
- parser_record.add_argument(
- "--resume",
- type=int,
- default=0,
- help="Resume recording on an existing dataset.",
- )
- parser_replay = subparsers.add_parser("replay", parents=[base_parser])
- parser_replay.add_argument(
- "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)"
- )
- parser_replay.add_argument(
- "--root",
- type=Path,
- default=None,
- help="Root directory where the dataset will be stored locally (e.g. 'data/hf_username/dataset_name'). By default, stored in cache folder.",
- )
- parser_replay.add_argument(
- "--repo-id",
- type=str,
- default="lerobot/test",
- help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).",
- )
- parser_replay.add_argument("--episode", type=int, default=0, help="Index of the episodes to replay.")
-
- args = parser.parse_args()
-
- init_logging()
-
- control_mode = args.mode
- robot_path = args.robot_path
- env_config_path = args.sim_config
- kwargs = vars(args)
- del kwargs["mode"]
- del kwargs["robot_path"]
- del kwargs["sim_config"]
-
- # make gym env
- env_cfg = init_hydra_config(env_config_path)
- importlib.import_module(f"gym_{env_cfg.env.name}")
-
- def env_constructor():
- return gym.make(env_cfg.env.handle, disable_env_checker=True, **env_cfg.env.gym)
-
- robot = None
- process_leader_actions_fn = None
-
- if control_mode in ["teleoperate", "record"]:
- # make robot
- robot_overrides = ["~cameras", "~follower_arms"]
- robot_cfg = init_hydra_config(robot_path, robot_overrides)
- robot = make_robot(robot_cfg)
- robot.connect()
-
- calib_kwgs = init_sim_calibration(robot, env_cfg.calibration)
-
- def process_leader_actions_fn(action):
- return real_positions_to_sim(action, **calib_kwgs)
-
- robot.leader_arms.main.calibration = None
-
- if control_mode == "teleoperate":
- teleoperate(env_constructor, robot, process_leader_actions_fn)
-
- elif control_mode == "record":
- record(env_constructor, robot, process_leader_actions_fn, **kwargs)
-
- elif control_mode == "replay":
- replay(env_constructor, **kwargs)
-
- else:
- raise ValueError(
- f"Invalid control mode: '{control_mode}', only valid modes are teleoperate, record and replay."
- )
-
- if robot and robot.is_connected:
- # Disconnect manually to avoid a "Core dump" during process
- # termination due to camera threads not properly exiting.
- robot.disconnect()
diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py
deleted file mode 100644
index 040f92d964..0000000000
--- a/lerobot/scripts/eval.py
+++ /dev/null
@@ -1,584 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Evaluate a policy on an environment by running rollouts and computing metrics.
-
-Usage examples:
-
-You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/diffusion_pusht)
-for 10 episodes.
-
-```
-python lerobot/scripts/eval.py -p lerobot/diffusion_pusht eval.n_episodes=10
-```
-
-OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes.
-
-```
-python lerobot/scripts/eval.py \
- -p outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \
- eval.n_episodes=10
-```
-
-Note that in both examples, the repo/folder should contain at least `config.json`, `config.yaml` and
-`model.safetensors`.
-
-Note the formatting for providing the number of episodes. Generally, you may provide any number of arguments
-with `qualified.parameter.name=value`. In this case, the parameter eval.n_episodes appears as `n_episodes`
-nested under `eval` in the `config.yaml` found at
-https://huggingface.co/lerobot/diffusion_pusht/tree/main.
-"""
-
-import argparse
-import json
-import logging
-import threading
-import time
-from contextlib import nullcontext
-from copy import deepcopy
-from datetime import datetime as dt
-from pathlib import Path
-from typing import Callable
-
-import einops
-import gymnasium as gym
-import numpy as np
-import torch
-from huggingface_hub import snapshot_download
-from huggingface_hub.errors import RepositoryNotFoundError
-from huggingface_hub.utils._validators import HFValidationError
-from torch import Tensor, nn
-from tqdm import trange
-
-from lerobot.common.datasets.factory import make_dataset
-from lerobot.common.envs.factory import make_env
-from lerobot.common.envs.utils import preprocess_observation
-from lerobot.common.logger import log_output_dir
-from lerobot.common.policies.factory import make_policy
-from lerobot.common.policies.policy_protocol import Policy
-from lerobot.common.policies.utils import get_device_from_parameters
-from lerobot.common.utils.io_utils import write_video
-from lerobot.common.utils.utils import (
- get_safe_torch_device,
- init_hydra_config,
- init_logging,
- inside_slurm,
- set_global_seed,
-)
-
-
-def rollout(
- env: gym.vector.VectorEnv,
- policy: Policy,
- seeds: list[int] | None = None,
- return_observations: bool = False,
- render_callback: Callable[[gym.vector.VectorEnv], None] | None = None,
-) -> dict:
- """Run a batched policy rollout once through a batch of environments.
-
- Note that all environments in the batch are run until the last environment is done. This means some
- data will probably need to be discarded (for environments that aren't the first one to be done).
-
- The return dictionary contains:
- (optional) "observation": A a dictionary of (batch, sequence + 1, *) tensors mapped to observation
- keys. NOTE the that this has an extra sequence element relative to the other keys in the
- dictionary. This is because an extra observation is included for after the environment is
- terminated or truncated.
- "action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
- including the last observations).
- "reward": A (batch, sequence) tensor of rewards received for applying the actions.
- "success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon
- environment termination/truncation).
- "done": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element,
- the first True is followed by True's all the way till the end. This can be used for masking
- extraneous elements from the sequences above.
-
- Args:
- env: The batch of environments.
- policy: The policy. Must be a PyTorch nn module.
- seeds: The environments are seeded once at the start of the rollout. If provided, this argument
- specifies the seeds for each of the environments.
- return_observations: Whether to include all observations in the returned rollout data. Observations
- are returned optionally because they typically take more memory to cache. Defaults to False.
- render_callback: Optional rendering callback to be used after the environments are reset, and after
- every step.
- Returns:
- The dictionary described above.
- """
- assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module."
- device = get_device_from_parameters(policy)
-
- # Reset the policy and environments.
- policy.reset()
-
- observation, info = env.reset(seed=seeds)
- if render_callback is not None:
- render_callback(env)
-
- all_observations = []
- all_actions = []
- all_rewards = []
- all_successes = []
- all_dones = []
-
- step = 0
- # Keep track of which environments are done.
- done = np.array([False] * env.num_envs)
- max_steps = env.call("_max_episode_steps")[0]
- progbar = trange(
- max_steps,
- desc=f"Running rollout with at most {max_steps} steps",
- disable=inside_slurm(), # we dont want progress bar when we use slurm, since it clutters the logs
- leave=False,
- )
- while not np.all(done):
- # Numpy array to tensor and changing dictionary keys to LeRobot policy format.
- observation = preprocess_observation(observation)
- if return_observations:
- all_observations.append(deepcopy(observation))
-
- observation = {key: observation[key].to(device, non_blocking=True) for key in observation}
-
- with torch.inference_mode():
- action = policy.select_action(observation)
-
- # Convert to CPU / numpy.
- action = action.to("cpu").numpy()
- assert action.ndim == 2, "Action dimensions should be (batch, action_dim)"
-
- # Apply the next action.
- observation, reward, terminated, truncated, info = env.step(action)
- if render_callback is not None:
- render_callback(env)
-
- # VectorEnv stores is_success in `info["final_info"][env_index]["is_success"]`. "final_info" isn't
- # available of none of the envs finished.
- if "final_info" in info:
- successes = [info["is_success"] if info is not None else False for info in info["final_info"]]
- else:
- successes = [False] * env.num_envs
-
- # Keep track of which environments are done so far.
- done = terminated | truncated | done
-
- all_actions.append(torch.from_numpy(action))
- all_rewards.append(torch.from_numpy(reward))
- all_dones.append(torch.from_numpy(done))
- all_successes.append(torch.tensor(successes))
-
- step += 1
- running_success_rate = (
- einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean()
- )
- progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"})
- progbar.update()
-
- # Track the final observation.
- if return_observations:
- observation = preprocess_observation(observation)
- all_observations.append(deepcopy(observation))
-
- # Stack the sequence along the first dimension so that we have (batch, sequence, *) tensors.
- ret = {
- "action": torch.stack(all_actions, dim=1),
- "reward": torch.stack(all_rewards, dim=1),
- "success": torch.stack(all_successes, dim=1),
- "done": torch.stack(all_dones, dim=1),
- }
- if return_observations:
- stacked_observations = {}
- for key in all_observations[0]:
- stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1)
- ret["observation"] = stacked_observations
-
- return ret
-
-
-def eval_policy(
- env: gym.vector.VectorEnv,
- policy: torch.nn.Module,
- n_episodes: int,
- max_episodes_rendered: int = 0,
- videos_dir: Path | None = None,
- return_episode_data: bool = False,
- start_seed: int | None = None,
-) -> dict:
- """
- Args:
- env: The batch of environments.
- policy: The policy.
- n_episodes: The number of episodes to evaluate.
- max_episodes_rendered: Maximum number of episodes to render into videos.
- videos_dir: Where to save rendered videos.
- return_episode_data: Whether to return episode data for online training. Incorporates the data into
- the "episodes" key of the returned dictionary.
- start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the
- seed is incremented by 1. If not provided, the environments are not manually seeded.
- Returns:
- Dictionary with metrics and data regarding the rollouts.
- """
- if max_episodes_rendered > 0 and not videos_dir:
- raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.")
-
- assert isinstance(policy, Policy)
- start = time.time()
- policy.eval()
-
- # Determine how many batched rollouts we need to get n_episodes. Note that if n_episodes is not evenly
- # divisible by env.num_envs we end up discarding some data in the last batch.
- n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0)
-
- # Keep track of some metrics.
- sum_rewards = []
- max_rewards = []
- all_successes = []
- all_seeds = []
- threads = [] # for video saving threads
- n_episodes_rendered = 0 # for saving the correct number of videos
-
- # Callback for visualization.
- def render_frame(env: gym.vector.VectorEnv):
- # noqa: B023
- if n_episodes_rendered >= max_episodes_rendered:
- return
- n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs)
- if isinstance(env, gym.vector.SyncVectorEnv):
- ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)])) # noqa: B023
- elif isinstance(env, gym.vector.AsyncVectorEnv):
- # Here we must render all frames and discard any we don't need.
- ep_frames.append(np.stack(env.call("render")[:n_to_render_now]))
-
- if max_episodes_rendered > 0:
- video_paths: list[str] = []
-
- if return_episode_data:
- episode_data: dict | None = None
-
- # we dont want progress bar when we use slurm, since it clutters the logs
- progbar = trange(n_batches, desc="Stepping through eval batches", disable=inside_slurm())
- for batch_ix in progbar:
- # Cache frames for rendering videos. Each item will be (b, h, w, c), and the list indexes the rollout
- # step.
- if max_episodes_rendered > 0:
- ep_frames: list[np.ndarray] = []
-
- if start_seed is None:
- seeds = None
- else:
- seeds = range(
- start_seed + (batch_ix * env.num_envs), start_seed + ((batch_ix + 1) * env.num_envs)
- )
- rollout_data = rollout(
- env,
- policy,
- seeds=list(seeds) if seeds else None,
- return_observations=return_episode_data,
- render_callback=render_frame if max_episodes_rendered > 0 else None,
- )
-
- # Figure out where in each rollout sequence the first done condition was encountered (results after
- # this won't be included).
- n_steps = rollout_data["done"].shape[1]
- # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker.
- done_indices = torch.argmax(rollout_data["done"].to(int), dim=1)
-
- # Make a mask with shape (batch, n_steps) to mask out rollout data after the first done
- # (batch-element-wise). Note the `done_indices + 1` to make sure to keep the data from the done step.
- mask = (torch.arange(n_steps) <= einops.repeat(done_indices + 1, "b -> b s", s=n_steps)).int()
- # Extend metrics.
- batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum")
- sum_rewards.extend(batch_sum_rewards.tolist())
- batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max")
- max_rewards.extend(batch_max_rewards.tolist())
- batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any")
- all_successes.extend(batch_successes.tolist())
- if seeds:
- all_seeds.extend(seeds)
- else:
- all_seeds.append(None)
-
- # FIXME: episode_data is either None or it doesn't exist
- if return_episode_data:
- this_episode_data = _compile_episode_data(
- rollout_data,
- done_indices,
- start_episode_index=batch_ix * env.num_envs,
- start_data_index=(0 if episode_data is None else (episode_data["index"][-1].item() + 1)),
- fps=env.unwrapped.metadata["render_fps"],
- )
- if episode_data is None:
- episode_data = this_episode_data
- else:
- # Some sanity checks to make sure we are correctly compiling the data.
- assert episode_data["episode_index"][-1] + 1 == this_episode_data["episode_index"][0]
- assert episode_data["index"][-1] + 1 == this_episode_data["index"][0]
- # Concatenate the episode data.
- episode_data = {k: torch.cat([episode_data[k], this_episode_data[k]]) for k in episode_data}
-
- # Maybe render video for visualization.
- if max_episodes_rendered > 0 and len(ep_frames) > 0:
- batch_stacked_frames = np.stack(ep_frames, axis=1) # (b, t, *)
- for stacked_frames, done_index in zip(
- batch_stacked_frames, done_indices.flatten().tolist(), strict=False
- ):
- if n_episodes_rendered >= max_episodes_rendered:
- break
-
- videos_dir.mkdir(parents=True, exist_ok=True)
- video_path = videos_dir / f"eval_episode_{n_episodes_rendered}.mp4"
- video_paths.append(str(video_path))
- thread = threading.Thread(
- target=write_video,
- args=(
- str(video_path),
- stacked_frames[: done_index + 1], # + 1 to capture the last observation
- env.unwrapped.metadata["render_fps"],
- ),
- )
- thread.start()
- threads.append(thread)
- n_episodes_rendered += 1
-
- progbar.set_postfix(
- {"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"}
- )
-
- # Wait till all video rendering threads are done.
- for thread in threads:
- thread.join()
-
- # Compile eval info.
- info = {
- "per_episode": [
- {
- "episode_ix": i,
- "sum_reward": sum_reward,
- "max_reward": max_reward,
- "success": success,
- "seed": seed,
- }
- for i, (sum_reward, max_reward, success, seed) in enumerate(
- zip(
- sum_rewards[:n_episodes],
- max_rewards[:n_episodes],
- all_successes[:n_episodes],
- all_seeds[:n_episodes],
- strict=True,
- )
- )
- ],
- "aggregated": {
- "avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])),
- "avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])),
- "pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100),
- "eval_s": time.time() - start,
- "eval_ep_s": (time.time() - start) / n_episodes,
- },
- }
-
- if return_episode_data:
- info["episodes"] = episode_data
-
- if max_episodes_rendered > 0:
- info["video_paths"] = video_paths
-
- return info
-
-
-def _compile_episode_data(
- rollout_data: dict, done_indices: Tensor, start_episode_index: int, start_data_index: int, fps: float
-) -> dict:
- """Convenience function for `eval_policy(return_episode_data=True)`
-
- Compiles all the rollout data into a Hugging Face dataset.
-
- Similar logic is implemented when datasets are pushed to hub (see: `push_to_hub`).
- """
- ep_dicts = []
- total_frames = 0
- for ep_ix in range(rollout_data["action"].shape[0]):
- # + 2 to include the first done frame and the last observation frame.
- num_frames = done_indices[ep_ix].item() + 2
- total_frames += num_frames
-
- # Here we do `num_frames - 1` as we don't want to include the last observation frame just yet.
- ep_dict = {
- "action": rollout_data["action"][ep_ix, : num_frames - 1],
- "episode_index": torch.tensor([start_episode_index + ep_ix] * (num_frames - 1)),
- "frame_index": torch.arange(0, num_frames - 1, 1),
- "timestamp": torch.arange(0, num_frames - 1, 1) / fps,
- "next.done": rollout_data["done"][ep_ix, : num_frames - 1],
- "next.success": rollout_data["success"][ep_ix, : num_frames - 1],
- "next.reward": rollout_data["reward"][ep_ix, : num_frames - 1].type(torch.float32),
- }
-
- # For the last observation frame, all other keys will just be copy padded.
- for k in ep_dict:
- ep_dict[k] = torch.cat([ep_dict[k], ep_dict[k][-1:]])
-
- for key in rollout_data["observation"]:
- ep_dict[key] = rollout_data["observation"][key][ep_ix, :num_frames]
-
- ep_dicts.append(ep_dict)
-
- data_dict = {}
- for key in ep_dicts[0]:
- data_dict[key] = torch.cat([x[key] for x in ep_dicts])
-
- data_dict["index"] = torch.arange(start_data_index, start_data_index + total_frames, 1)
-
- return data_dict
-
-
-def main(
- pretrained_policy_path: Path | None = None,
- hydra_cfg_path: str | None = None,
- out_dir: str | None = None,
- config_overrides: list[str] | None = None,
-):
- assert (pretrained_policy_path is None) ^ (hydra_cfg_path is None)
- if pretrained_policy_path is not None:
- hydra_cfg = init_hydra_config(str(pretrained_policy_path / "config.yaml"), config_overrides)
- else:
- hydra_cfg = init_hydra_config(hydra_cfg_path, config_overrides)
-
- if hydra_cfg.eval.batch_size > hydra_cfg.eval.n_episodes:
- raise ValueError(
- "The eval batch size is greater than the number of eval episodes "
- f"({hydra_cfg.eval.batch_size} > {hydra_cfg.eval.n_episodes}). As a result, {hydra_cfg.eval.batch_size} "
- f"eval environments will be instantiated, but only {hydra_cfg.eval.n_episodes} will be used. "
- "This might significantly slow down evaluation. To fix this, you should update your command "
- f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={hydra_cfg.eval.batch_size}`), "
- f"or lower the batch size (e.g. `eval.batch_size={hydra_cfg.eval.n_episodes}`)."
- )
-
- if out_dir is None:
- out_dir = f"outputs/eval/{dt.now().strftime('%Y-%m-%d/%H-%M-%S')}_{hydra_cfg.env.name}_{hydra_cfg.policy.name}"
-
- # Check device is available
- device = get_safe_torch_device(hydra_cfg.device, log=True)
-
- torch.backends.cudnn.benchmark = True
- torch.backends.cuda.matmul.allow_tf32 = True
- set_global_seed(hydra_cfg.seed)
-
- log_output_dir(out_dir)
-
- logging.info("Making environment.")
- env = make_env(hydra_cfg)
-
- logging.info("Making policy.")
- if hydra_cfg_path is None:
- policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=str(pretrained_policy_path))
- else:
- # Note: We need the dataset stats to pass to the policy's normalization modules.
- policy = make_policy(hydra_cfg=hydra_cfg, dataset_stats=make_dataset(hydra_cfg).meta.stats)
-
- assert isinstance(policy, nn.Module)
- policy.eval()
-
- with torch.no_grad(), torch.autocast(device_type=device.type) if hydra_cfg.use_amp else nullcontext():
- info = eval_policy(
- env,
- policy,
- hydra_cfg.eval.n_episodes,
- max_episodes_rendered=10,
- videos_dir=Path(out_dir) / "videos",
- start_seed=hydra_cfg.seed,
- )
- print(info["aggregated"])
-
- # Save info
- with open(Path(out_dir) / "eval_info.json", "w") as f:
- json.dump(info, f, indent=2)
-
- env.close()
-
- logging.info("End of eval")
-
-
-def get_pretrained_policy_path(pretrained_policy_name_or_path, revision=None):
- try:
- pretrained_policy_path = Path(snapshot_download(pretrained_policy_name_or_path, revision=revision))
- except (HFValidationError, RepositoryNotFoundError) as e:
- if isinstance(e, HFValidationError):
- error_message = (
- "The provided pretrained_policy_name_or_path is not a valid Hugging Face Hub repo ID."
- )
- else:
- error_message = (
- "The provided pretrained_policy_name_or_path was not found on the Hugging Face Hub."
- )
-
- logging.warning(f"{error_message} Treating it as a local directory.")
- pretrained_policy_path = Path(pretrained_policy_name_or_path)
- if not pretrained_policy_path.is_dir() or not pretrained_policy_path.exists():
- raise ValueError(
- "The provided pretrained_policy_name_or_path is not a valid/existing Hugging Face Hub "
- "repo ID, nor is it an existing local directory."
- )
- return pretrained_policy_path
-
-
-if __name__ == "__main__":
- init_logging()
-
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
- )
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument(
- "-p",
- "--pretrained-policy-name-or-path",
- help=(
- "Either the repo ID of a model hosted on the Hub or a path to a directory containing weights "
- "saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch "
- "(useful for debugging). This argument is mutually exclusive with `--config`."
- ),
- )
- group.add_argument(
- "--config",
- help=(
- "Path to a yaml config you want to use for initializing a policy from scratch (useful for "
- "debugging). This argument is mutually exclusive with `--pretrained-policy-name-or-path` (`-p`)."
- ),
- )
- parser.add_argument("--revision", help="Optionally provide the Hugging Face Hub revision ID.")
- parser.add_argument(
- "--out-dir",
- help=(
- "Where to save the evaluation outputs. If not provided, outputs are saved in "
- "outputs/eval/{timestamp}_{env_name}_{policy_name}"
- ),
- )
- parser.add_argument(
- "overrides",
- nargs="*",
- help="Any key=value arguments to override config values (use dots for.nested=overrides)",
- )
- args = parser.parse_args()
-
- if args.pretrained_policy_name_or_path is None:
- main(hydra_cfg_path=args.config, out_dir=args.out_dir, config_overrides=args.overrides)
- else:
- pretrained_policy_path = get_pretrained_policy_path(
- args.pretrained_policy_name_or_path, revision=args.revision
- )
-
- main(
- pretrained_policy_path=pretrained_policy_path,
- out_dir=args.out_dir,
- config_overrides=args.overrides,
- )
diff --git a/lerobot/scripts/find_motors_bus_port.py b/lerobot/scripts/find_motors_bus_port.py
deleted file mode 100644
index 67b92ad7d6..0000000000
--- a/lerobot/scripts/find_motors_bus_port.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import time
-from pathlib import Path
-
-from serial.tools import list_ports # Part of pyserial library
-
-
-def find_available_ports():
- if os.name == "nt": # Windows
- # List COM ports using pyserial
- ports = [port.device for port in list_ports.comports()]
- else: # Linux/macOS
- # List /dev/tty* ports for Unix-based systems
- ports = [str(path) for path in Path("/dev").glob("tty*")]
- return ports
-
-
-def find_port():
- print("Finding all available ports for the MotorsBus.")
- ports_before = find_available_ports()
- print("Ports before disconnecting:", ports_before)
-
- print("Remove the USB cable from your MotorsBus and press Enter when done.")
- input() # Wait for user to disconnect the device
-
- time.sleep(0.5) # Allow some time for port to be released
- ports_after = find_available_ports()
- ports_diff = list(set(ports_before) - set(ports_after))
-
- if len(ports_diff) == 1:
- port = ports_diff[0]
- print(f"The port of this MotorsBus is '{port}'")
- print("Reconnect the USB cable.")
- elif len(ports_diff) == 0:
- raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).")
- else:
- raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).")
-
-
-if __name__ == "__main__":
- # Helper to find the USB port associated with your MotorsBus.
- find_port()
diff --git a/lerobot/scripts/push_dataset_to_hub.py b/lerobot/scripts/push_dataset_to_hub.py
deleted file mode 100644
index 0233ede696..0000000000
--- a/lerobot/scripts/push_dataset_to_hub.py
+++ /dev/null
@@ -1,364 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Use this script to convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub,
-or store it locally. LeRobot dataset format is lightweight, fast to load from, and does not require any
-installation of neural net specific packages like pytorch, tensorflow, jax.
-
-Example of how to download raw datasets, convert them into LeRobotDataset format, and push them to the hub:
-```
-python lerobot/scripts/push_dataset_to_hub.py \
---raw-dir data/pusht_raw \
---raw-format pusht_zarr \
---repo-id lerobot/pusht
-
-python lerobot/scripts/push_dataset_to_hub.py \
---raw-dir data/xarm_lift_medium_raw \
---raw-format xarm_pkl \
---repo-id lerobot/xarm_lift_medium
-
-python lerobot/scripts/push_dataset_to_hub.py \
---raw-dir data/aloha_sim_insertion_scripted_raw \
---raw-format aloha_hdf5 \
---repo-id lerobot/aloha_sim_insertion_scripted
-
-python lerobot/scripts/push_dataset_to_hub.py \
---raw-dir data/umi_cup_in_the_wild_raw \
---raw-format umi_zarr \
---repo-id lerobot/umi_cup_in_the_wild
-```
-"""
-
-import argparse
-import json
-import shutil
-import warnings
-from pathlib import Path
-from typing import Any
-
-import torch
-from huggingface_hub import HfApi
-from safetensors.torch import save_file
-
-from lerobot.common.datasets.compute_stats import compute_stats
-from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
-from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
-from lerobot.common.datasets.utils import create_branch, create_lerobot_dataset_card, flatten_dict
-
-
-def get_from_raw_to_lerobot_format_fn(raw_format: str):
- if raw_format == "pusht_zarr":
- from lerobot.common.datasets.push_dataset_to_hub.pusht_zarr_format import from_raw_to_lerobot_format
- elif raw_format == "umi_zarr":
- from lerobot.common.datasets.push_dataset_to_hub.umi_zarr_format import from_raw_to_lerobot_format
- elif raw_format == "aloha_hdf5":
- from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import from_raw_to_lerobot_format
- elif raw_format in ["rlds", "openx"]:
- from lerobot.common.datasets.push_dataset_to_hub.openx_rlds_format import from_raw_to_lerobot_format
- elif raw_format == "dora_parquet":
- from lerobot.common.datasets.push_dataset_to_hub.dora_parquet_format import from_raw_to_lerobot_format
- elif raw_format == "xarm_pkl":
- from lerobot.common.datasets.push_dataset_to_hub.xarm_pkl_format import from_raw_to_lerobot_format
- elif raw_format == "cam_png":
- from lerobot.common.datasets.push_dataset_to_hub.cam_png_format import from_raw_to_lerobot_format
- else:
- raise ValueError(
- f"The selected {raw_format} can't be found. Did you add it to `lerobot/scripts/push_dataset_to_hub.py::get_from_raw_to_lerobot_format_fn`?"
- )
-
- return from_raw_to_lerobot_format
-
-
-def save_meta_data(
- info: dict[str, Any], stats: dict, episode_data_index: dict[str, list], meta_data_dir: Path
-):
- meta_data_dir.mkdir(parents=True, exist_ok=True)
-
- # save info
- info_path = meta_data_dir / "info.json"
- with open(str(info_path), "w") as f:
- json.dump(info, f, indent=4)
-
- # save stats
- stats_path = meta_data_dir / "stats.safetensors"
- save_file(flatten_dict(stats), stats_path)
-
- # save episode_data_index
- episode_data_index = {key: torch.tensor(episode_data_index[key]) for key in episode_data_index}
- ep_data_idx_path = meta_data_dir / "episode_data_index.safetensors"
- save_file(episode_data_index, ep_data_idx_path)
-
-
-def push_meta_data_to_hub(repo_id: str, meta_data_dir: str | Path, revision: str | None):
- """Expect all meta data files to be all stored in a single "meta_data" directory.
- On the hugging face repositery, they will be uploaded in a "meta_data" directory at the root.
- """
- api = HfApi()
- api.upload_folder(
- folder_path=meta_data_dir,
- path_in_repo="meta_data",
- repo_id=repo_id,
- revision=revision,
- repo_type="dataset",
- )
-
-
-def push_dataset_card_to_hub(
- repo_id: str,
- revision: str | None,
- tags: list | None = None,
- license: str = "apache-2.0",
- **card_kwargs,
-):
- """Creates and pushes a LeRobotDataset Card with appropriate tags to easily find it on the hub."""
- card = create_lerobot_dataset_card(tags=tags, license=license, **card_kwargs)
- card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=revision)
-
-
-def push_videos_to_hub(repo_id: str, videos_dir: str | Path, revision: str | None):
- """Expect mp4 files to be all stored in a single "videos" directory.
- On the hugging face repositery, they will be uploaded in a "videos" directory at the root.
- """
- api = HfApi()
- api.upload_folder(
- folder_path=videos_dir,
- path_in_repo="videos",
- repo_id=repo_id,
- revision=revision,
- repo_type="dataset",
- allow_patterns="*.mp4",
- )
-
-
-def push_dataset_to_hub(
- raw_dir: Path,
- raw_format: str,
- repo_id: str,
- push_to_hub: bool = True,
- local_dir: Path | None = None,
- fps: int | None = None,
- video: bool = True,
- batch_size: int = 32,
- num_workers: int = 8,
- episodes: list[int] | None = None,
- force_override: bool = False,
- resume: bool = False,
- cache_dir: Path = Path("/tmp"),
- tests_data_dir: Path | None = None,
- encoding: dict | None = None,
-):
- check_repo_id(repo_id)
- user_id, dataset_id = repo_id.split("/")
-
- # Robustify when `raw_dir` is str instead of Path
- raw_dir = Path(raw_dir)
- if not raw_dir.exists():
- raise NotADirectoryError(
- f"{raw_dir} does not exists. Check your paths or run this command to download an existing raw dataset on the hub: "
- f"`python lerobot/common/datasets/push_dataset_to_hub/_download_raw.py --raw-dir your/raw/dir --repo-id your/repo/id_raw`"
- )
-
- if local_dir:
- # Robustify when `local_dir` is str instead of Path
- local_dir = Path(local_dir)
-
- # Send warning if local_dir isn't well formated
- if local_dir.parts[-2] != user_id or local_dir.parts[-1] != dataset_id:
- warnings.warn(
- f"`local_dir` ({local_dir}) doesn't contain a community or user id `/` the name of the dataset that match the `repo_id` (e.g. 'data/lerobot/pusht'). Following this naming convention is advised, but not mandatory.",
- stacklevel=1,
- )
-
- # Check we don't override an existing `local_dir` by mistake
- if local_dir.exists():
- if force_override:
- shutil.rmtree(local_dir)
- elif not resume:
- raise ValueError(f"`local_dir` already exists ({local_dir}). Use `--force-override 1`.")
-
- meta_data_dir = local_dir / "meta_data"
- videos_dir = local_dir / "videos"
- else:
- # Temporary directory used to store images, videos, meta_data
- meta_data_dir = Path(cache_dir) / "meta_data"
- videos_dir = Path(cache_dir) / "videos"
-
- if raw_format is None:
- # TODO(rcadene, adilzouitine): implement auto_find_raw_format
- raise NotImplementedError()
- # raw_format = auto_find_raw_format(raw_dir)
-
- # convert dataset from original raw format to LeRobot format
- from_raw_to_lerobot_format = get_from_raw_to_lerobot_format_fn(raw_format)
-
- hf_dataset, episode_data_index, info = from_raw_to_lerobot_format(
- raw_dir,
- videos_dir,
- fps,
- video,
- episodes,
- encoding,
- )
-
- lerobot_dataset = LeRobotDataset.from_preloaded(
- repo_id=repo_id,
- hf_dataset=hf_dataset,
- episode_data_index=episode_data_index,
- info=info,
- videos_dir=videos_dir,
- )
- stats = compute_stats(lerobot_dataset, batch_size, num_workers)
-
- if local_dir:
- hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved
- hf_dataset.save_to_disk(str(local_dir / "train"))
-
- if push_to_hub or local_dir:
- # mandatory for upload
- save_meta_data(info, stats, episode_data_index, meta_data_dir)
-
- if push_to_hub:
- hf_dataset.push_to_hub(repo_id, revision="main")
- push_meta_data_to_hub(repo_id, meta_data_dir, revision="main")
- push_dataset_card_to_hub(repo_id, revision="main")
- if video:
- push_videos_to_hub(repo_id, videos_dir, revision="main")
- create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION)
-
- if tests_data_dir:
- # get the first episode
- num_items_first_ep = episode_data_index["to"][0] - episode_data_index["from"][0]
- test_hf_dataset = hf_dataset.select(range(num_items_first_ep))
- episode_data_index = {k: v[:1] for k, v in episode_data_index.items()}
-
- test_hf_dataset = test_hf_dataset.with_format(None)
- test_hf_dataset.save_to_disk(str(tests_data_dir / repo_id / "train"))
-
- tests_meta_data = tests_data_dir / repo_id / "meta_data"
- save_meta_data(info, stats, episode_data_index, tests_meta_data)
-
- # copy videos of first episode to tests directory
- episode_index = 0
- tests_videos_dir = tests_data_dir / repo_id / "videos"
- tests_videos_dir.mkdir(parents=True, exist_ok=True)
- for key in lerobot_dataset.camera_keys:
- fname = f"{key}_episode_{episode_index:06d}.mp4"
- shutil.copy(videos_dir / fname, tests_videos_dir / fname)
-
- if local_dir is None:
- # clear cache
- shutil.rmtree(meta_data_dir)
- shutil.rmtree(videos_dir)
-
- return lerobot_dataset
-
-
-def main():
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--raw-dir",
- type=Path,
- required=True,
- help="Directory containing input raw datasets (e.g. `data/aloha_mobile_chair_raw` or `data/pusht_raw).",
- )
- # TODO(rcadene): add automatic detection of the format
- parser.add_argument(
- "--raw-format",
- type=str,
- required=True,
- help="Dataset type (e.g. `pusht_zarr`, `umi_zarr`, `aloha_hdf5`, `xarm_pkl`, `dora_parquet`, `rlds`, `openx`).",
- )
- parser.add_argument(
- "--repo-id",
- type=str,
- required=True,
- help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
- )
- parser.add_argument(
- "--local-dir",
- type=Path,
- help="When provided, writes the dataset converted to LeRobotDataset format in this directory (e.g. `data/lerobot/aloha_mobile_chair`).",
- )
- parser.add_argument(
- "--push-to-hub",
- type=int,
- default=1,
- help="Upload to hub.",
- )
- parser.add_argument(
- "--fps",
- type=int,
- help="Frame rate used to collect videos. If not provided, use the default one specified in the code.",
- )
- parser.add_argument(
- "--video",
- type=int,
- default=1,
- help="Convert each episode of the raw dataset to an mp4 video. This option allows 60 times lower disk space consumption and 25 faster loading time during training.",
- )
- parser.add_argument(
- "--batch-size",
- type=int,
- default=32,
- help="Batch size loaded by DataLoader for computing the dataset statistics.",
- )
- parser.add_argument(
- "--num-workers",
- type=int,
- default=8,
- help="Number of processes of Dataloader for computing the dataset statistics.",
- )
- parser.add_argument(
- "--episodes",
- type=int,
- nargs="*",
- help="When provided, only converts the provided episodes (e.g `--episodes 2 3 4`). Useful to test the code on 1 episode.",
- )
- parser.add_argument(
- "--force-override",
- type=int,
- default=0,
- help="When set to 1, removes provided output directory if it already exists. By default, raises a ValueError exception.",
- )
- parser.add_argument(
- "--resume",
- type=int,
- default=0,
- help="When set to 1, resumes a previous run.",
- )
- parser.add_argument(
- "--cache-dir",
- type=Path,
- required=False,
- default="/tmp",
- help="Directory to store the temporary videos and images generated while creating the dataset.",
- )
- parser.add_argument(
- "--tests-data-dir",
- type=Path,
- help=(
- "When provided, save tests artifacts into the given directory "
- "(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})."
- ),
- )
-
- args = parser.parse_args()
- push_dataset_to_hub(**vars(args))
-
-
-if __name__ == "__main__":
- main()
diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py
deleted file mode 100644
index 9a0b7e4cbb..0000000000
--- a/lerobot/scripts/train.py
+++ /dev/null
@@ -1,669 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-import time
-from concurrent.futures import ThreadPoolExecutor
-from contextlib import nullcontext
-from copy import deepcopy
-from pathlib import Path
-from pprint import pformat
-from threading import Lock
-
-import hydra
-import numpy as np
-import torch
-from deepdiff import DeepDiff
-from omegaconf import DictConfig, ListConfig, OmegaConf
-from termcolor import colored
-from torch import nn
-from torch.cuda.amp import GradScaler
-
-from lerobot.common.datasets.factory import make_dataset, resolve_delta_timestamps
-from lerobot.common.datasets.lerobot_dataset import MultiLeRobotDataset
-from lerobot.common.datasets.online_buffer import OnlineBuffer, compute_sampler_weights
-from lerobot.common.datasets.sampler import EpisodeAwareSampler
-from lerobot.common.datasets.utils import cycle
-from lerobot.common.envs.factory import make_env
-from lerobot.common.logger import Logger, log_output_dir
-from lerobot.common.policies.factory import make_policy
-from lerobot.common.policies.policy_protocol import PolicyWithUpdate
-from lerobot.common.policies.utils import get_device_from_parameters
-from lerobot.common.utils.utils import (
- format_big_number,
- get_safe_torch_device,
- init_hydra_config,
- init_logging,
- set_global_seed,
-)
-from lerobot.scripts.eval import eval_policy
-
-
-def make_optimizer_and_scheduler(cfg, policy):
- if cfg.policy.name == "act":
- optimizer_params_dicts = [
- {
- "params": [
- p
- for n, p in policy.named_parameters()
- if not n.startswith("model.backbone") and p.requires_grad
- ]
- },
- {
- "params": [
- p
- for n, p in policy.named_parameters()
- if n.startswith("model.backbone") and p.requires_grad
- ],
- "lr": cfg.training.lr_backbone,
- },
- ]
- optimizer = torch.optim.AdamW(
- optimizer_params_dicts, lr=cfg.training.lr, weight_decay=cfg.training.weight_decay
- )
- lr_scheduler = None
- elif cfg.policy.name == "diffusion":
- optimizer = torch.optim.Adam(
- policy.diffusion.parameters(),
- cfg.training.lr,
- cfg.training.adam_betas,
- cfg.training.adam_eps,
- cfg.training.adam_weight_decay,
- )
- from diffusers.optimization import get_scheduler
-
- lr_scheduler = get_scheduler(
- cfg.training.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=cfg.training.lr_warmup_steps,
- num_training_steps=cfg.training.offline_steps,
- )
- elif policy.name == "tdmpc":
- optimizer = torch.optim.Adam(policy.parameters(), cfg.training.lr)
- lr_scheduler = None
- elif cfg.policy.name == "vqbet":
- from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTOptimizer, VQBeTScheduler
-
- optimizer = VQBeTOptimizer(policy, cfg)
- lr_scheduler = VQBeTScheduler(optimizer, cfg)
- else:
- raise NotImplementedError()
-
- return optimizer, lr_scheduler
-
-
-def update_policy(
- policy,
- batch,
- optimizer,
- grad_clip_norm,
- grad_scaler: GradScaler,
- lr_scheduler=None,
- use_amp: bool = False,
- lock=None,
-):
- """Returns a dictionary of items for logging."""
- start_time = time.perf_counter()
- device = get_device_from_parameters(policy)
- policy.train()
- with torch.autocast(device_type=device.type) if use_amp else nullcontext():
- output_dict = policy.forward(batch)
- # TODO(rcadene): policy.unnormalize_outputs(out_dict)
- loss = output_dict["loss"]
- grad_scaler.scale(loss).backward()
-
- # Unscale the graident of the optimzer's assigned params in-place **prior to gradient clipping**.
- grad_scaler.unscale_(optimizer)
-
- grad_norm = torch.nn.utils.clip_grad_norm_(
- policy.parameters(),
- grad_clip_norm,
- error_if_nonfinite=False,
- )
-
- # Optimizer's gradients are already unscaled, so scaler.step does not unscale them,
- # although it still skips optimizer.step() if the gradients contain infs or NaNs.
- with lock if lock is not None else nullcontext():
- grad_scaler.step(optimizer)
- # Updates the scale for next iteration.
- grad_scaler.update()
-
- optimizer.zero_grad()
-
- if lr_scheduler is not None:
- lr_scheduler.step()
-
- if isinstance(policy, PolicyWithUpdate):
- # To possibly update an internal buffer (for instance an Exponential Moving Average like in TDMPC).
- policy.update()
-
- info = {
- "loss": loss.item(),
- "grad_norm": float(grad_norm),
- "lr": optimizer.param_groups[0]["lr"],
- "update_s": time.perf_counter() - start_time,
- **{k: v for k, v in output_dict.items() if k != "loss"},
- }
- info.update({k: v for k, v in output_dict.items() if k not in info})
-
- return info
-
-
-def log_train_info(logger: Logger, info, step, cfg, dataset, is_online):
- loss = info["loss"]
- grad_norm = info["grad_norm"]
- lr = info["lr"]
- update_s = info["update_s"]
- dataloading_s = info["dataloading_s"]
-
- # A sample is an (observation,action) pair, where observation and action
- # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples.
- num_samples = (step + 1) * cfg.training.batch_size
- avg_samples_per_ep = dataset.num_frames / dataset.num_episodes
- num_episodes = num_samples / avg_samples_per_ep
- num_epochs = num_samples / dataset.num_frames
- log_items = [
- f"step:{format_big_number(step)}",
- # number of samples seen during training
- f"smpl:{format_big_number(num_samples)}",
- # number of episodes seen during training
- f"ep:{format_big_number(num_episodes)}",
- # number of time all unique samples are seen
- f"epch:{num_epochs:.2f}",
- f"loss:{loss:.3f}",
- f"grdn:{grad_norm:.3f}",
- f"lr:{lr:0.1e}",
- # in seconds
- f"updt_s:{update_s:.3f}",
- f"data_s:{dataloading_s:.3f}", # if not ~0, you are bottlenecked by cpu or io
- ]
- logging.info(" ".join(log_items))
-
- info["step"] = step
- info["num_samples"] = num_samples
- info["num_episodes"] = num_episodes
- info["num_epochs"] = num_epochs
- info["is_online"] = is_online
-
- logger.log_dict(info, step, mode="train")
-
-
-def log_eval_info(logger, info, step, cfg, dataset, is_online):
- eval_s = info["eval_s"]
- avg_sum_reward = info["avg_sum_reward"]
- pc_success = info["pc_success"]
-
- # A sample is an (observation,action) pair, where observation and action
- # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples.
- num_samples = (step + 1) * cfg.training.batch_size
- avg_samples_per_ep = dataset.num_frames / dataset.num_episodes
- num_episodes = num_samples / avg_samples_per_ep
- num_epochs = num_samples / dataset.num_frames
- log_items = [
- f"step:{format_big_number(step)}",
- # number of samples seen during training
- f"smpl:{format_big_number(num_samples)}",
- # number of episodes seen during training
- f"ep:{format_big_number(num_episodes)}",
- # number of time all unique samples are seen
- f"epch:{num_epochs:.2f}",
- f"∑rwrd:{avg_sum_reward:.3f}",
- f"success:{pc_success:.1f}%",
- f"eval_s:{eval_s:.3f}",
- ]
- logging.info(" ".join(log_items))
-
- info["step"] = step
- info["num_samples"] = num_samples
- info["num_episodes"] = num_episodes
- info["num_epochs"] = num_epochs
- info["is_online"] = is_online
-
- logger.log_dict(info, step, mode="eval")
-
-
-def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = None):
- if out_dir is None:
- raise NotImplementedError()
- if job_name is None:
- raise NotImplementedError()
-
- init_logging()
- logging.info(pformat(OmegaConf.to_container(cfg)))
-
- if cfg.training.online_steps > 0 and isinstance(cfg.dataset_repo_id, ListConfig):
- raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.")
-
- # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need
- # to check for any differences between the provided config and the checkpoint's config.
- if cfg.resume:
- if not Logger.get_last_checkpoint_dir(out_dir).exists():
- raise RuntimeError(
- "You have set resume=True, but there is no model checkpoint in "
- f"{Logger.get_last_checkpoint_dir(out_dir)}"
- )
- checkpoint_cfg_path = str(Logger.get_last_pretrained_model_dir(out_dir) / "config.yaml")
- logging.info(
- colored(
- "You have set resume=True, indicating that you wish to resume a run",
- color="yellow",
- attrs=["bold"],
- )
- )
- # Get the configuration file from the last checkpoint.
- checkpoint_cfg = init_hydra_config(checkpoint_cfg_path)
- # Check for differences between the checkpoint configuration and provided configuration.
- # Hack to resolve the delta_timestamps ahead of time in order to properly diff.
- resolve_delta_timestamps(cfg)
- diff = DeepDiff(OmegaConf.to_container(checkpoint_cfg), OmegaConf.to_container(cfg))
- # Ignore the `resume` and parameters.
- if "values_changed" in diff and "root['resume']" in diff["values_changed"]:
- del diff["values_changed"]["root['resume']"]
- # Log a warning about differences between the checkpoint configuration and the provided
- # configuration.
- if len(diff) > 0:
- logging.warning(
- "At least one difference was detected between the checkpoint configuration and "
- f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration "
- "takes precedence.",
- )
- # Use the checkpoint config instead of the provided config (but keep `resume` parameter).
- cfg = checkpoint_cfg
- cfg.resume = True
- elif Logger.get_last_checkpoint_dir(out_dir).exists():
- raise RuntimeError(
- f"The configured output directory {Logger.get_last_checkpoint_dir(out_dir)} already exists. If "
- "you meant to resume training, please use `resume=true` in your command or yaml configuration."
- )
-
- if cfg.eval.batch_size > cfg.eval.n_episodes:
- raise ValueError(
- "The eval batch size is greater than the number of eval episodes "
- f"({cfg.eval.batch_size} > {cfg.eval.n_episodes}). As a result, {cfg.eval.batch_size} "
- f"eval environments will be instantiated, but only {cfg.eval.n_episodes} will be used. "
- "This might significantly slow down evaluation. To fix this, you should update your command "
- f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={cfg.eval.batch_size}`), "
- f"or lower the batch size (e.g. `eval.batch_size={cfg.eval.n_episodes}`)."
- )
-
- # log metrics to terminal and wandb
- logger = Logger(cfg, out_dir, wandb_job_name=job_name)
-
- set_global_seed(cfg.seed)
-
- # Check device is available
- device = get_safe_torch_device(cfg.device, log=True)
-
- torch.backends.cudnn.benchmark = True
- torch.backends.cuda.matmul.allow_tf32 = True
-
- logging.info("make_dataset")
- offline_dataset = make_dataset(cfg)
- if isinstance(offline_dataset, MultiLeRobotDataset):
- logging.info(
- "Multiple datasets were provided. Applied the following index mapping to the provided datasets: "
- f"{pformat(offline_dataset.repo_id_to_index , indent=2)}"
- )
-
- # Create environment used for evaluating checkpoints during training on simulation data.
- # On real-world data, no need to create an environment as evaluations are done outside train.py,
- # using the eval.py instead, with gym_dora environment and dora-rs.
- eval_env = None
- if cfg.training.eval_freq > 0:
- logging.info("make_env")
- eval_env = make_env(cfg)
-
- logging.info("make_policy")
- policy = make_policy(
- hydra_cfg=cfg,
- dataset_stats=offline_dataset.meta.stats if not cfg.resume else None,
- pretrained_policy_name_or_path=str(logger.last_pretrained_model_dir) if cfg.resume else None,
- )
- assert isinstance(policy, nn.Module)
- # Create optimizer and scheduler
- # Temporary hack to move optimizer out of policy
- optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy)
- grad_scaler = GradScaler(enabled=cfg.use_amp)
-
- step = 0 # number of policy updates (forward + backward + optim)
-
- if cfg.resume:
- step = logger.load_last_training_state(optimizer, lr_scheduler)
-
- num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
- num_total_params = sum(p.numel() for p in policy.parameters())
-
- log_output_dir(out_dir)
- logging.info(f"{cfg.env.task=}")
- logging.info(f"{cfg.training.offline_steps=} ({format_big_number(cfg.training.offline_steps)})")
- logging.info(f"{cfg.training.online_steps=}")
- logging.info(f"{offline_dataset.num_frames=} ({format_big_number(offline_dataset.num_frames)})")
- logging.info(f"{offline_dataset.num_episodes=}")
- logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})")
- logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})")
-
- # Note: this helper will be used in offline and online training loops.
- def evaluate_and_checkpoint_if_needed(step, is_online):
- _num_digits = max(6, len(str(cfg.training.offline_steps + cfg.training.online_steps)))
- step_identifier = f"{step:0{_num_digits}d}"
-
- if cfg.training.eval_freq > 0 and step % cfg.training.eval_freq == 0:
- logging.info(f"Eval policy at step {step}")
- with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext():
- assert eval_env is not None
- eval_info = eval_policy(
- eval_env,
- policy,
- cfg.eval.n_episodes,
- videos_dir=Path(out_dir) / "eval" / f"videos_step_{step_identifier}",
- max_episodes_rendered=4,
- start_seed=cfg.seed,
- )
- log_eval_info(logger, eval_info["aggregated"], step, cfg, offline_dataset, is_online=is_online)
- if cfg.wandb.enable:
- logger.log_video(eval_info["video_paths"][0], step, mode="eval")
- logging.info("Resume training")
-
- if cfg.training.save_checkpoint and (
- step % cfg.training.save_freq == 0
- or step == cfg.training.offline_steps + cfg.training.online_steps
- ):
- logging.info(f"Checkpoint policy after step {step}")
- # Note: Save with step as the identifier, and format it to have at least 6 digits but more if
- # needed (choose 6 as a minimum for consistency without being overkill).
- logger.save_checkpoint(
- step,
- policy,
- optimizer,
- lr_scheduler,
- identifier=step_identifier,
- )
- logging.info("Resume training")
-
- # create dataloader for offline training
- if cfg.training.get("drop_n_last_frames"):
- shuffle = False
- sampler = EpisodeAwareSampler(
- offline_dataset.episode_data_index,
- drop_n_last_frames=cfg.training.drop_n_last_frames,
- shuffle=True,
- )
- else:
- shuffle = True
- sampler = None
- dataloader = torch.utils.data.DataLoader(
- offline_dataset,
- num_workers=cfg.training.num_workers,
- batch_size=cfg.training.batch_size,
- shuffle=shuffle,
- sampler=sampler,
- pin_memory=device.type != "cpu",
- drop_last=False,
- )
- dl_iter = cycle(dataloader)
-
- policy.train()
- offline_step = 0
- for _ in range(step, cfg.training.offline_steps):
- if offline_step == 0:
- logging.info("Start offline training on a fixed dataset")
-
- start_time = time.perf_counter()
- batch = next(dl_iter)
- dataloading_s = time.perf_counter() - start_time
-
- for key in batch:
- batch[key] = batch[key].to(device, non_blocking=True)
-
- train_info = update_policy(
- policy,
- batch,
- optimizer,
- cfg.training.grad_clip_norm,
- grad_scaler=grad_scaler,
- lr_scheduler=lr_scheduler,
- use_amp=cfg.use_amp,
- )
-
- train_info["dataloading_s"] = dataloading_s
-
- if step % cfg.training.log_freq == 0:
- log_train_info(logger, train_info, step, cfg, offline_dataset, is_online=False)
-
- # Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed,
- # so we pass in step + 1.
- evaluate_and_checkpoint_if_needed(step + 1, is_online=False)
-
- step += 1
- offline_step += 1 # noqa: SIM113
-
- if cfg.training.online_steps == 0:
- if eval_env:
- eval_env.close()
- logging.info("End of training")
- return
-
- # Online training.
-
- # Create an env dedicated to online episodes collection from policy rollout.
- online_env = make_env(cfg, n_envs=cfg.training.online_rollout_batch_size)
- resolve_delta_timestamps(cfg)
- online_buffer_path = logger.log_dir / "online_buffer"
- if cfg.resume and not online_buffer_path.exists():
- # If we are resuming a run, we default to the data shapes and buffer capacity from the saved online
- # buffer.
- logging.warning(
- "When online training is resumed, we load the latest online buffer from the prior run, "
- "and this might not coincide with the state of the buffer as it was at the moment the checkpoint "
- "was made. This is because the online buffer is updated on disk during training, independently "
- "of our explicit checkpointing mechanisms."
- )
- online_dataset = OnlineBuffer(
- online_buffer_path,
- data_spec={
- **{k: {"shape": v, "dtype": np.dtype("float32")} for k, v in policy.config.input_shapes.items()},
- **{k: {"shape": v, "dtype": np.dtype("float32")} for k, v in policy.config.output_shapes.items()},
- "next.reward": {"shape": (), "dtype": np.dtype("float32")},
- "next.done": {"shape": (), "dtype": np.dtype("?")},
- "next.success": {"shape": (), "dtype": np.dtype("?")},
- },
- buffer_capacity=cfg.training.online_buffer_capacity,
- fps=online_env.unwrapped.metadata["render_fps"],
- delta_timestamps=cfg.training.delta_timestamps,
- )
-
- # If we are doing online rollouts asynchronously, deepcopy the policy to use for online rollouts (this
- # makes it possible to do online rollouts in parallel with training updates).
- online_rollout_policy = deepcopy(policy) if cfg.training.do_online_rollout_async else policy
-
- # Create dataloader for online training.
- concat_dataset = torch.utils.data.ConcatDataset([offline_dataset, online_dataset])
- sampler_weights = compute_sampler_weights(
- offline_dataset,
- offline_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0),
- online_dataset=online_dataset,
- # +1 because online rollouts return an extra frame for the "final observation". Note: we don't have
- # this final observation in the offline datasets, but we might add them in future.
- online_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0) + 1,
- online_sampling_ratio=cfg.training.online_sampling_ratio,
- )
- sampler = torch.utils.data.WeightedRandomSampler(
- sampler_weights,
- num_samples=len(concat_dataset),
- replacement=True,
- )
- dataloader = torch.utils.data.DataLoader(
- concat_dataset,
- batch_size=cfg.training.batch_size,
- num_workers=cfg.training.num_workers,
- sampler=sampler,
- pin_memory=device.type != "cpu",
- drop_last=True,
- )
- dl_iter = cycle(dataloader)
-
- # Lock and thread pool executor for asynchronous online rollouts. When asynchronous mode is disabled,
- # these are still used but effectively do nothing.
- lock = Lock()
- # Note: 1 worker because we only ever want to run one set of online rollouts at a time. Batch
- # parallelization of rollouts is handled within the job.
- executor = ThreadPoolExecutor(max_workers=1)
-
- online_step = 0
- online_rollout_s = 0 # time take to do online rollout
- update_online_buffer_s = 0 # time taken to update the online buffer with the online rollout data
- # Time taken waiting for the online buffer to finish being updated. This is relevant when using the async
- # online rollout option.
- await_update_online_buffer_s = 0
- rollout_start_seed = cfg.training.online_env_seed
-
- while True:
- if online_step == cfg.training.online_steps:
- break
-
- if online_step == 0:
- logging.info("Start online training by interacting with environment")
-
- def sample_trajectory_and_update_buffer():
- nonlocal rollout_start_seed
- with lock:
- online_rollout_policy.load_state_dict(policy.state_dict())
- online_rollout_policy.eval()
- start_rollout_time = time.perf_counter()
- with torch.no_grad():
- eval_info = eval_policy(
- online_env,
- online_rollout_policy,
- n_episodes=cfg.training.online_rollout_n_episodes,
- max_episodes_rendered=min(10, cfg.training.online_rollout_n_episodes),
- videos_dir=logger.log_dir / "online_rollout_videos",
- return_episode_data=True,
- start_seed=(
- rollout_start_seed := (rollout_start_seed + cfg.training.batch_size) % 1000000
- ),
- )
- online_rollout_s = time.perf_counter() - start_rollout_time
-
- with lock:
- start_update_buffer_time = time.perf_counter()
- online_dataset.add_data(eval_info["episodes"])
-
- # Update the concatenated dataset length used during sampling.
- concat_dataset.cumulative_sizes = concat_dataset.cumsum(concat_dataset.datasets)
-
- # Update the sampling weights.
- sampler.weights = compute_sampler_weights(
- offline_dataset,
- offline_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0),
- online_dataset=online_dataset,
- # +1 because online rollouts return an extra frame for the "final observation". Note: we don't have
- # this final observation in the offline datasets, but we might add them in future.
- online_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0) + 1,
- online_sampling_ratio=cfg.training.online_sampling_ratio,
- )
- sampler.num_frames = len(concat_dataset)
-
- update_online_buffer_s = time.perf_counter() - start_update_buffer_time
-
- return online_rollout_s, update_online_buffer_s
-
- future = executor.submit(sample_trajectory_and_update_buffer)
- # If we aren't doing async rollouts, or if we haven't yet gotten enough examples in our buffer, wait
- # here until the rollout and buffer update is done, before proceeding to the policy update steps.
- if (
- not cfg.training.do_online_rollout_async
- or len(online_dataset) <= cfg.training.online_buffer_seed_size
- ):
- online_rollout_s, update_online_buffer_s = future.result()
-
- if len(online_dataset) <= cfg.training.online_buffer_seed_size:
- logging.info(
- f"Seeding online buffer: {len(online_dataset)}/{cfg.training.online_buffer_seed_size}"
- )
- continue
-
- policy.train()
- for _ in range(cfg.training.online_steps_between_rollouts):
- with lock:
- start_time = time.perf_counter()
- batch = next(dl_iter)
- dataloading_s = time.perf_counter() - start_time
-
- for key in batch:
- batch[key] = batch[key].to(cfg.device, non_blocking=True)
-
- train_info = update_policy(
- policy,
- batch,
- optimizer,
- cfg.training.grad_clip_norm,
- grad_scaler=grad_scaler,
- lr_scheduler=lr_scheduler,
- use_amp=cfg.use_amp,
- lock=lock,
- )
-
- train_info["dataloading_s"] = dataloading_s
- train_info["online_rollout_s"] = online_rollout_s
- train_info["update_online_buffer_s"] = update_online_buffer_s
- train_info["await_update_online_buffer_s"] = await_update_online_buffer_s
- with lock:
- train_info["online_buffer_size"] = len(online_dataset)
-
- if step % cfg.training.log_freq == 0:
- log_train_info(logger, train_info, step, cfg, online_dataset, is_online=True)
-
- # Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed,
- # so we pass in step + 1.
- evaluate_and_checkpoint_if_needed(step + 1, is_online=True)
-
- step += 1
- online_step += 1
-
- # If we're doing async rollouts, we should now wait until we've completed them before proceeding
- # to do the next batch of rollouts.
- if future.running():
- start = time.perf_counter()
- online_rollout_s, update_online_buffer_s = future.result()
- await_update_online_buffer_s = time.perf_counter() - start
-
- if online_step >= cfg.training.online_steps:
- break
-
- if eval_env:
- eval_env.close()
- logging.info("End of training")
-
-
-@hydra.main(version_base="1.2", config_name="default", config_path="../configs")
-def train_cli(cfg: dict):
- train(
- cfg,
- out_dir=hydra.core.hydra_config.HydraConfig.get().run.dir,
- job_name=hydra.core.hydra_config.HydraConfig.get().job.name,
- )
-
-
-def train_notebook(out_dir=None, job_name=None, config_name="default", config_path="../configs"):
- from hydra import compose, initialize
-
- hydra.core.global_hydra.GlobalHydra.instance().clear()
- initialize(config_path=config_path)
- cfg = compose(config_name=config_name)
- train(cfg, out_dir=out_dir, job_name=job_name)
-
-
-if __name__ == "__main__":
- train_cli()
diff --git a/lerobot/scripts/visualize_image_transforms.py b/lerobot/scripts/visualize_image_transforms.py
deleted file mode 100644
index f9fb5c08aa..0000000000
--- a/lerobot/scripts/visualize_image_transforms.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Visualize effects of image transforms for a given configuration.
-
-This script will generate examples of transformed images as they are output by LeRobot dataset.
-Additionally, each individual transform can be visualized separately as well as examples of combined transforms
-
-
---- Usage Examples ---
-
-Increase hue jitter
-```
-python lerobot/scripts/visualize_image_transforms.py \
- dataset_repo_id=lerobot/aloha_mobile_shrimp \
- training.image_transforms.hue.min_max="[-0.25,0.25]"
-```
-
-Increase brightness & brightness weight
-```
-python lerobot/scripts/visualize_image_transforms.py \
- dataset_repo_id=lerobot/aloha_mobile_shrimp \
- training.image_transforms.brightness.weight=10.0 \
- training.image_transforms.brightness.min_max="[1.0,2.0]"
-```
-
-Blur images and disable saturation & hue
-```
-python lerobot/scripts/visualize_image_transforms.py \
- dataset_repo_id=lerobot/aloha_mobile_shrimp \
- training.image_transforms.sharpness.weight=10.0 \
- training.image_transforms.sharpness.min_max="[0.0,1.0]" \
- training.image_transforms.saturation.weight=0.0 \
- training.image_transforms.hue.weight=0.0
-```
-
-Use all transforms with random order
-```
-python lerobot/scripts/visualize_image_transforms.py \
- dataset_repo_id=lerobot/aloha_mobile_shrimp \
- training.image_transforms.max_num_transforms=5 \
- training.image_transforms.random_order=true
-```
-
-"""
-
-from pathlib import Path
-
-import hydra
-from torchvision.transforms import ToPILImage
-
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.common.datasets.transforms import get_image_transforms
-
-OUTPUT_DIR = Path("outputs/image_transforms")
-to_pil = ToPILImage()
-
-
-def save_config_all_transforms(cfg, original_frame, output_dir, n_examples):
- tf = get_image_transforms(
- brightness_weight=cfg.brightness.weight,
- brightness_min_max=cfg.brightness.min_max,
- contrast_weight=cfg.contrast.weight,
- contrast_min_max=cfg.contrast.min_max,
- saturation_weight=cfg.saturation.weight,
- saturation_min_max=cfg.saturation.min_max,
- hue_weight=cfg.hue.weight,
- hue_min_max=cfg.hue.min_max,
- sharpness_weight=cfg.sharpness.weight,
- sharpness_min_max=cfg.sharpness.min_max,
- max_num_transforms=cfg.max_num_transforms,
- random_order=cfg.random_order,
- )
-
- output_dir_all = output_dir / "all"
- output_dir_all.mkdir(parents=True, exist_ok=True)
-
- for i in range(1, n_examples + 1):
- transformed_frame = tf(original_frame)
- to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100)
-
- print("Combined transforms examples saved to:")
- print(f" {output_dir_all}")
-
-
-def save_config_single_transforms(cfg, original_frame, output_dir, n_examples):
- transforms = [
- "brightness",
- "contrast",
- "saturation",
- "hue",
- "sharpness",
- ]
- print("Individual transforms examples saved to:")
- for transform in transforms:
- # Apply one transformation with random value in min_max range
- kwargs = {
- f"{transform}_weight": cfg[f"{transform}"].weight,
- f"{transform}_min_max": cfg[f"{transform}"].min_max,
- }
- tf = get_image_transforms(**kwargs)
- output_dir_single = output_dir / f"{transform}"
- output_dir_single.mkdir(parents=True, exist_ok=True)
-
- for i in range(1, n_examples + 1):
- transformed_frame = tf(original_frame)
- to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100)
-
- # Apply min transformation
- min_value, max_value = cfg[f"{transform}"].min_max
- kwargs = {
- f"{transform}_weight": cfg[f"{transform}"].weight,
- f"{transform}_min_max": (min_value, min_value),
- }
- tf = get_image_transforms(**kwargs)
- transformed_frame = tf(original_frame)
- to_pil(transformed_frame).save(output_dir_single / "min.png", quality=100)
-
- # Apply max transformation
- kwargs = {
- f"{transform}_weight": cfg[f"{transform}"].weight,
- f"{transform}_min_max": (max_value, max_value),
- }
- tf = get_image_transforms(**kwargs)
- transformed_frame = tf(original_frame)
- to_pil(transformed_frame).save(output_dir_single / "max.png", quality=100)
-
- # Apply mean transformation
- mean_value = (min_value + max_value) / 2
- kwargs = {
- f"{transform}_weight": cfg[f"{transform}"].weight,
- f"{transform}_min_max": (mean_value, mean_value),
- }
- tf = get_image_transforms(**kwargs)
- transformed_frame = tf(original_frame)
- to_pil(transformed_frame).save(output_dir_single / "mean.png", quality=100)
-
- print(f" {output_dir_single}")
-
-
-def visualize_transforms(cfg, output_dir: Path, n_examples: int = 5):
- dataset = LeRobotDataset(cfg.dataset_repo_id)
-
- output_dir = output_dir / cfg.dataset_repo_id.split("/")[-1]
- output_dir.mkdir(parents=True, exist_ok=True)
-
- # Get 1st frame from 1st camera of 1st episode
- original_frame = dataset[0][dataset.meta.camera_keys[0]]
- to_pil(original_frame).save(output_dir / "original_frame.png", quality=100)
- print("\nOriginal frame saved to:")
- print(f" {output_dir / 'original_frame.png'}.")
-
- save_config_all_transforms(cfg.training.image_transforms, original_frame, output_dir, n_examples)
- save_config_single_transforms(cfg.training.image_transforms, original_frame, output_dir, n_examples)
-
-
-@hydra.main(version_base="1.2", config_name="default", config_path="../configs")
-def visualize_transforms_cli(cfg):
- visualize_transforms(cfg, output_dir=OUTPUT_DIR)
-
-
-if __name__ == "__main__":
- visualize_transforms_cli()
diff --git a/lerobot/templates/visualize_dataset_template.html b/lerobot/templates/visualize_dataset_template.html
deleted file mode 100644
index 3c93d2d623..0000000000
--- a/lerobot/templates/visualize_dataset_template.html
+++ /dev/null
@@ -1,458 +0,0 @@
-
-
-
-
-
-
-
-
-
-
- {{ dataset_info.repo_id }} episode {{ episode_id }}
-
-
-
-
-
- {
- // Use the space bar to play and pause, instead of default action (e.g. scrolling)
- const { keyCode, key } = e;
- if (keyCode === 32 || key === ' ') {
- e.preventDefault();
- $refs.btnPause.classList.contains('hidden') ? $refs.btnPlay.click() : $refs.btnPause.click();
- }else if (key === 'ArrowDown' || key === 'ArrowUp'){
- const nextEpisodeId = key === 'ArrowDown' ? {{ episode_id }} + 1 : {{ episode_id }} - 1;
- const lowestEpisodeId = {{ episodes }}.at(0);
- const highestEpisodeId = {{ episodes }}.at(-1);
- if(nextEpisodeId >= lowestEpisodeId && nextEpisodeId <= highestEpisodeId){
- window.location.href = `./episode_${nextEpisodeId}`;
- }
- }
-}">
-
-
-
-
-
-
-
-
-
diff --git a/media/aloha/follower_rest.webp b/media/aloha/follower_rest.webp
deleted file mode 100644
index 03698acd65..0000000000
Binary files a/media/aloha/follower_rest.webp and /dev/null differ
diff --git a/media/aloha/follower_rotated.webp b/media/aloha/follower_rotated.webp
deleted file mode 100644
index 914958bbc3..0000000000
Binary files a/media/aloha/follower_rotated.webp and /dev/null differ
diff --git a/media/aloha/follower_zero.webp b/media/aloha/follower_zero.webp
deleted file mode 100644
index c14c516cc6..0000000000
Binary files a/media/aloha/follower_zero.webp and /dev/null differ
diff --git a/media/aloha/leader_rest.webp b/media/aloha/leader_rest.webp
deleted file mode 100644
index 821fdf7b3b..0000000000
Binary files a/media/aloha/leader_rest.webp and /dev/null differ
diff --git a/media/aloha/leader_rotated.webp b/media/aloha/leader_rotated.webp
deleted file mode 100644
index ed4a3faa7d..0000000000
Binary files a/media/aloha/leader_rotated.webp and /dev/null differ
diff --git a/media/aloha/leader_zero.webp b/media/aloha/leader_zero.webp
deleted file mode 100644
index b67cfa773f..0000000000
Binary files a/media/aloha/leader_zero.webp and /dev/null differ
diff --git a/media/hope_jr/hopejr.png b/media/hope_jr/hopejr.png
new file mode 100644
index 0000000000..4186547a25
Binary files /dev/null and b/media/hope_jr/hopejr.png differ
diff --git a/media/koch/follower_rest.webp b/media/koch/follower_rest.webp
deleted file mode 100644
index 0a14d074c2..0000000000
Binary files a/media/koch/follower_rest.webp and /dev/null differ
diff --git a/media/koch/follower_rotated.webp b/media/koch/follower_rotated.webp
deleted file mode 100644
index 3a91d24908..0000000000
Binary files a/media/koch/follower_rotated.webp and /dev/null differ
diff --git a/media/koch/follower_zero.webp b/media/koch/follower_zero.webp
deleted file mode 100644
index aa107ed3d7..0000000000
Binary files a/media/koch/follower_zero.webp and /dev/null differ
diff --git a/media/koch/leader_rest.webp b/media/koch/leader_rest.webp
deleted file mode 100644
index e0454cfd4a..0000000000
Binary files a/media/koch/leader_rest.webp and /dev/null differ
diff --git a/media/koch/leader_rotated.webp b/media/koch/leader_rotated.webp
deleted file mode 100644
index 183e4206ef..0000000000
Binary files a/media/koch/leader_rotated.webp and /dev/null differ
diff --git a/media/koch/leader_zero.webp b/media/koch/leader_zero.webp
deleted file mode 100644
index f3b885acf9..0000000000
Binary files a/media/koch/leader_zero.webp and /dev/null differ
diff --git a/media/lekiwi/kiwi.webp b/media/lekiwi/kiwi.webp
new file mode 100644
index 0000000000..2dd7d92561
Binary files /dev/null and b/media/lekiwi/kiwi.webp differ
diff --git a/media/moss/follower_initial.webp b/media/moss/follower_initial.webp
deleted file mode 100644
index e7ded16bda..0000000000
Binary files a/media/moss/follower_initial.webp and /dev/null differ
diff --git a/media/moss/follower_rest.webp b/media/moss/follower_rest.webp
deleted file mode 100644
index f0dba18bd7..0000000000
Binary files a/media/moss/follower_rest.webp and /dev/null differ
diff --git a/media/moss/follower_rotated.webp b/media/moss/follower_rotated.webp
deleted file mode 100644
index 23d5aa9c1b..0000000000
Binary files a/media/moss/follower_rotated.webp and /dev/null differ
diff --git a/media/moss/follower_zero.webp b/media/moss/follower_zero.webp
deleted file mode 100644
index 10ef83704f..0000000000
Binary files a/media/moss/follower_zero.webp and /dev/null differ
diff --git a/media/moss/leader_rest.webp b/media/moss/leader_rest.webp
deleted file mode 100644
index cd77d294d1..0000000000
Binary files a/media/moss/leader_rest.webp and /dev/null differ
diff --git a/media/moss/leader_rotated.webp b/media/moss/leader_rotated.webp
deleted file mode 100644
index c3426650a6..0000000000
Binary files a/media/moss/leader_rotated.webp and /dev/null differ
diff --git a/media/moss/leader_zero.webp b/media/moss/leader_zero.webp
deleted file mode 100644
index d79ed37368..0000000000
Binary files a/media/moss/leader_zero.webp and /dev/null differ
diff --git a/media/so100/follower_initial.webp b/media/so100/follower_initial.webp
deleted file mode 100644
index 7f93a773a7..0000000000
Binary files a/media/so100/follower_initial.webp and /dev/null differ
diff --git a/media/so100/follower_rest.webp b/media/so100/follower_rest.webp
deleted file mode 100644
index 971fbc6842..0000000000
Binary files a/media/so100/follower_rest.webp and /dev/null differ
diff --git a/media/so100/follower_rotated.webp b/media/so100/follower_rotated.webp
deleted file mode 100644
index b13d7d7d57..0000000000
Binary files a/media/so100/follower_rotated.webp and /dev/null differ
diff --git a/media/so100/follower_zero.webp b/media/so100/follower_zero.webp
deleted file mode 100644
index 411a555454..0000000000
Binary files a/media/so100/follower_zero.webp and /dev/null differ
diff --git a/media/so100/leader_rest.webp b/media/so100/leader_rest.webp
deleted file mode 100644
index 351667778f..0000000000
Binary files a/media/so100/leader_rest.webp and /dev/null differ
diff --git a/media/so100/leader_rotated.webp b/media/so100/leader_rotated.webp
deleted file mode 100644
index 1f770f6ce7..0000000000
Binary files a/media/so100/leader_rotated.webp and /dev/null differ
diff --git a/media/so100/leader_zero.webp b/media/so100/leader_zero.webp
deleted file mode 100644
index 5f8c235f98..0000000000
Binary files a/media/so100/leader_zero.webp and /dev/null differ
diff --git a/media/so101/so101-leader.webp b/media/so101/so101-leader.webp
new file mode 100644
index 0000000000..22ff3a4bc5
Binary files /dev/null and b/media/so101/so101-leader.webp differ
diff --git a/media/so101/so101.webp b/media/so101/so101.webp
new file mode 100644
index 0000000000..ce65e94bc2
Binary files /dev/null and b/media/so101/so101.webp differ
diff --git a/media/tutorial/koch_v1_1_leader_follower.webp b/media/tutorial/koch_v1_1_leader_follower.webp
deleted file mode 100644
index f576a531a1..0000000000
Binary files a/media/tutorial/koch_v1_1_leader_follower.webp and /dev/null differ
diff --git a/media/tutorial/visualize_dataset_html.webp b/media/tutorial/visualize_dataset_html.webp
deleted file mode 100644
index e71bc56297..0000000000
Binary files a/media/tutorial/visualize_dataset_html.webp and /dev/null differ
diff --git a/poetry.lock b/poetry.lock
deleted file mode 100644
index 576d7e600d..0000000000
--- a/poetry.lock
+++ /dev/null
@@ -1,7578 +0,0 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
-
-[[package]]
-name = "absl-py"
-version = "2.1.0"
-description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"},
- {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"},
-]
-
-[[package]]
-name = "aiohappyeyeballs"
-version = "2.4.3"
-description = "Happy Eyeballs for asyncio"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"},
- {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"},
-]
-
-[[package]]
-name = "aiohttp"
-version = "3.10.10"
-description = "Async http client/server framework (asyncio)"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be7443669ae9c016b71f402e43208e13ddf00912f47f623ee5994e12fc7d4b3f"},
- {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b06b7843929e41a94ea09eb1ce3927865387e3e23ebe108e0d0d09b08d25be9"},
- {file = "aiohttp-3.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:333cf6cf8e65f6a1e06e9eb3e643a0c515bb850d470902274239fea02033e9a8"},
- {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:274cfa632350225ce3fdeb318c23b4a10ec25c0e2c880eff951a3842cf358ac1"},
- {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9e5e4a85bdb56d224f412d9c98ae4cbd032cc4f3161818f692cd81766eee65a"},
- {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b606353da03edcc71130b52388d25f9a30a126e04caef1fd637e31683033abd"},
- {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab5a5a0c7a7991d90446a198689c0535be89bbd6b410a1f9a66688f0880ec026"},
- {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578a4b875af3e0daaf1ac6fa983d93e0bbfec3ead753b6d6f33d467100cdc67b"},
- {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8105fd8a890df77b76dd3054cddf01a879fc13e8af576805d667e0fa0224c35d"},
- {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3bcd391d083f636c06a68715e69467963d1f9600f85ef556ea82e9ef25f043f7"},
- {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fbc6264158392bad9df19537e872d476f7c57adf718944cc1e4495cbabf38e2a"},
- {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e48d5021a84d341bcaf95c8460b152cfbad770d28e5fe14a768988c461b821bc"},
- {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2609e9ab08474702cc67b7702dbb8a80e392c54613ebe80db7e8dbdb79837c68"},
- {file = "aiohttp-3.10.10-cp310-cp310-win32.whl", hash = "sha256:84afcdea18eda514c25bc68b9af2a2b1adea7c08899175a51fe7c4fb6d551257"},
- {file = "aiohttp-3.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:9c72109213eb9d3874f7ac8c0c5fa90e072d678e117d9061c06e30c85b4cf0e6"},
- {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c30a0eafc89d28e7f959281b58198a9fa5e99405f716c0289b7892ca345fe45f"},
- {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:258c5dd01afc10015866114e210fb7365f0d02d9d059c3c3415382ab633fcbcb"},
- {file = "aiohttp-3.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:15ecd889a709b0080f02721255b3f80bb261c2293d3c748151274dfea93ac871"},
- {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3935f82f6f4a3820270842e90456ebad3af15810cf65932bd24da4463bc0a4c"},
- {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:413251f6fcf552a33c981c4709a6bba37b12710982fec8e558ae944bfb2abd38"},
- {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1720b4f14c78a3089562b8875b53e36b51c97c51adc53325a69b79b4b48ebcb"},
- {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:679abe5d3858b33c2cf74faec299fda60ea9de62916e8b67e625d65bf069a3b7"},
- {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79019094f87c9fb44f8d769e41dbb664d6e8fcfd62f665ccce36762deaa0e911"},
- {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe2fb38c2ed905a2582948e2de560675e9dfbee94c6d5ccdb1301c6d0a5bf092"},
- {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a3f00003de6eba42d6e94fabb4125600d6e484846dbf90ea8e48a800430cc142"},
- {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1bbb122c557a16fafc10354b9d99ebf2f2808a660d78202f10ba9d50786384b9"},
- {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:30ca7c3b94708a9d7ae76ff281b2f47d8eaf2579cd05971b5dc681db8caac6e1"},
- {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:df9270660711670e68803107d55c2b5949c2e0f2e4896da176e1ecfc068b974a"},
- {file = "aiohttp-3.10.10-cp311-cp311-win32.whl", hash = "sha256:aafc8ee9b742ce75044ae9a4d3e60e3d918d15a4c2e08a6c3c3e38fa59b92d94"},
- {file = "aiohttp-3.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:362f641f9071e5f3ee6f8e7d37d5ed0d95aae656adf4ef578313ee585b585959"},
- {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9294bbb581f92770e6ed5c19559e1e99255e4ca604a22c5c6397b2f9dd3ee42c"},
- {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a8fa23fe62c436ccf23ff930149c047f060c7126eae3ccea005f0483f27b2e28"},
- {file = "aiohttp-3.10.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c6a5b8c7926ba5d8545c7dd22961a107526562da31a7a32fa2456baf040939f"},
- {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:007ec22fbc573e5eb2fb7dec4198ef8f6bf2fe4ce20020798b2eb5d0abda6138"},
- {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9627cc1a10c8c409b5822a92d57a77f383b554463d1884008e051c32ab1b3742"},
- {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50edbcad60d8f0e3eccc68da67f37268b5144ecc34d59f27a02f9611c1d4eec7"},
- {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a45d85cf20b5e0d0aa5a8dca27cce8eddef3292bc29d72dcad1641f4ed50aa16"},
- {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b00807e2605f16e1e198f33a53ce3c4523114059b0c09c337209ae55e3823a8"},
- {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f2d4324a98062be0525d16f768a03e0bbb3b9fe301ceee99611dc9a7953124e6"},
- {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:438cd072f75bb6612f2aca29f8bd7cdf6e35e8f160bc312e49fbecab77c99e3a"},
- {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:baa42524a82f75303f714108fea528ccacf0386af429b69fff141ffef1c534f9"},
- {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a7d8d14fe962153fc681f6366bdec33d4356f98a3e3567782aac1b6e0e40109a"},
- {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1277cd707c465cd09572a774559a3cc7c7a28802eb3a2a9472588f062097205"},
- {file = "aiohttp-3.10.10-cp312-cp312-win32.whl", hash = "sha256:59bb3c54aa420521dc4ce3cc2c3fe2ad82adf7b09403fa1f48ae45c0cbde6628"},
- {file = "aiohttp-3.10.10-cp312-cp312-win_amd64.whl", hash = "sha256:0e1b370d8007c4ae31ee6db7f9a2fe801a42b146cec80a86766e7ad5c4a259cf"},
- {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ad7593bb24b2ab09e65e8a1d385606f0f47c65b5a2ae6c551db67d6653e78c28"},
- {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1eb89d3d29adaf533588f209768a9c02e44e4baf832b08118749c5fad191781d"},
- {file = "aiohttp-3.10.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3fe407bf93533a6fa82dece0e74dbcaaf5d684e5a51862887f9eaebe6372cd79"},
- {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aed5155f819873d23520919e16703fc8925e509abbb1a1491b0087d1cd969e"},
- {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f05e9727ce409358baa615dbeb9b969db94324a79b5a5cea45d39bdb01d82e6"},
- {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dffb610a30d643983aeb185ce134f97f290f8935f0abccdd32c77bed9388b42"},
- {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6658732517ddabe22c9036479eabce6036655ba87a0224c612e1ae6af2087e"},
- {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:741a46d58677d8c733175d7e5aa618d277cd9d880301a380fd296975a9cdd7bc"},
- {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e00e3505cd80440f6c98c6d69269dcc2a119f86ad0a9fd70bccc59504bebd68a"},
- {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ffe595f10566f8276b76dc3a11ae4bb7eba1aac8ddd75811736a15b0d5311414"},
- {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdfcf6443637c148c4e1a20c48c566aa694fa5e288d34b20fcdc58507882fed3"},
- {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d183cf9c797a5291e8301790ed6d053480ed94070637bfaad914dd38b0981f67"},
- {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77abf6665ae54000b98b3c742bc6ea1d1fb31c394bcabf8b5d2c1ac3ebfe7f3b"},
- {file = "aiohttp-3.10.10-cp313-cp313-win32.whl", hash = "sha256:4470c73c12cd9109db8277287d11f9dd98f77fc54155fc71a7738a83ffcc8ea8"},
- {file = "aiohttp-3.10.10-cp313-cp313-win_amd64.whl", hash = "sha256:486f7aabfa292719a2753c016cc3a8f8172965cabb3ea2e7f7436c7f5a22a151"},
- {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1b66ccafef7336a1e1f0e389901f60c1d920102315a56df85e49552308fc0486"},
- {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:acd48d5b80ee80f9432a165c0ac8cbf9253eaddb6113269a5e18699b33958dbb"},
- {file = "aiohttp-3.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3455522392fb15ff549d92fbf4b73b559d5e43dc522588f7eb3e54c3f38beee7"},
- {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c3b868724137f713a38376fef8120c166d1eadd50da1855c112fe97954aed8"},
- {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:da1dee8948d2137bb51fbb8a53cce6b1bcc86003c6b42565f008438b806cccd8"},
- {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5ce2ce7c997e1971b7184ee37deb6ea9922ef5163c6ee5aa3c274b05f9e12fa"},
- {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28529e08fde6f12eba8677f5a8608500ed33c086f974de68cc65ab218713a59d"},
- {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7db54c7914cc99d901d93a34704833568d86c20925b2762f9fa779f9cd2e70f"},
- {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03a42ac7895406220124c88911ebee31ba8b2d24c98507f4a8bf826b2937c7f2"},
- {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7e338c0523d024fad378b376a79faff37fafb3c001872a618cde1d322400a572"},
- {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:038f514fe39e235e9fef6717fbf944057bfa24f9b3db9ee551a7ecf584b5b480"},
- {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:64f6c17757251e2b8d885d728b6433d9d970573586a78b78ba8929b0f41d045a"},
- {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:93429602396f3383a797a2a70e5f1de5df8e35535d7806c9f91df06f297e109b"},
- {file = "aiohttp-3.10.10-cp38-cp38-win32.whl", hash = "sha256:c823bc3971c44ab93e611ab1a46b1eafeae474c0c844aff4b7474287b75fe49c"},
- {file = "aiohttp-3.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:54ca74df1be3c7ca1cf7f4c971c79c2daf48d9aa65dea1a662ae18926f5bc8ce"},
- {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01948b1d570f83ee7bbf5a60ea2375a89dfb09fd419170e7f5af029510033d24"},
- {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9fc1500fd2a952c5c8e3b29aaf7e3cc6e27e9cfc0a8819b3bce48cc1b849e4cc"},
- {file = "aiohttp-3.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f614ab0c76397661b90b6851a030004dac502e48260ea10f2441abd2207fbcc7"},
- {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00819de9e45d42584bed046314c40ea7e9aea95411b38971082cad449392b08c"},
- {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05646ebe6b94cc93407b3bf34b9eb26c20722384d068eb7339de802154d61bc5"},
- {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:998f3bd3cfc95e9424a6acd7840cbdd39e45bc09ef87533c006f94ac47296090"},
- {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9010c31cd6fa59438da4e58a7f19e4753f7f264300cd152e7f90d4602449762"},
- {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ea7ffc6d6d6f8a11e6f40091a1040995cdff02cfc9ba4c2f30a516cb2633554"},
- {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ef9c33cc5cbca35808f6c74be11eb7f5f6b14d2311be84a15b594bd3e58b5527"},
- {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ce0cdc074d540265bfeb31336e678b4e37316849d13b308607efa527e981f5c2"},
- {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:597a079284b7ee65ee102bc3a6ea226a37d2b96d0418cc9047490f231dc09fe8"},
- {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7789050d9e5d0c309c706953e5e8876e38662d57d45f936902e176d19f1c58ab"},
- {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e7f8b04d83483577fd9200461b057c9f14ced334dcb053090cea1da9c8321a91"},
- {file = "aiohttp-3.10.10-cp39-cp39-win32.whl", hash = "sha256:c02a30b904282777d872266b87b20ed8cc0d1501855e27f831320f471d54d983"},
- {file = "aiohttp-3.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:edfe3341033a6b53a5c522c802deb2079eee5cbfbb0af032a55064bd65c73a23"},
- {file = "aiohttp-3.10.10.tar.gz", hash = "sha256:0631dd7c9f0822cc61c88586ca76d5b5ada26538097d0f1df510b082bad3411a"},
-]
-
-[package.dependencies]
-aiohappyeyeballs = ">=2.3.0"
-aiosignal = ">=1.1.2"
-async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""}
-attrs = ">=17.3.0"
-frozenlist = ">=1.1.1"
-multidict = ">=4.5,<7.0"
-yarl = ">=1.12.0,<2.0"
-
-[package.extras]
-speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"]
-
-[[package]]
-name = "aioserial"
-version = "1.3.1"
-description = "An asynchronous serial port library of Python"
-optional = true
-python-versions = ">=3.6,<4.0"
-files = [
- {file = "aioserial-1.3.1.tar.gz", hash = "sha256:702bf03b0eb84b8ef2d8dac5cb925e1e685dce98f77b125569bc6fd2b3b58228"},
-]
-
-[package.dependencies]
-pyserial = "*"
-
-[[package]]
-name = "aiosignal"
-version = "1.3.1"
-description = "aiosignal: a list of registered asynchronous callbacks"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
- {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
-]
-
-[package.dependencies]
-frozenlist = ">=1.1.0"
-
-[[package]]
-name = "antlr4-python3-runtime"
-version = "4.9.3"
-description = "ANTLR 4.9.3 runtime for Python 3.7"
-optional = false
-python-versions = "*"
-files = [
- {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"},
-]
-
-[[package]]
-name = "anyio"
-version = "4.6.0"
-description = "High level compatibility layer for multiple asynchronous event loop implementations"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"},
- {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"},
-]
-
-[package.dependencies]
-exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
-idna = ">=2.8"
-sniffio = ">=1.1"
-typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
-
-[package.extras]
-doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
-test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"]
-trio = ["trio (>=0.26.1)"]
-
-[[package]]
-name = "appnope"
-version = "0.1.4"
-description = "Disable App Nap on macOS >= 10.9"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"},
- {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"},
-]
-
-[[package]]
-name = "argon2-cffi"
-version = "23.1.0"
-description = "Argon2 for Python"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"},
- {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"},
-]
-
-[package.dependencies]
-argon2-cffi-bindings = "*"
-
-[package.extras]
-dev = ["argon2-cffi[tests,typing]", "tox (>4)"]
-docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"]
-tests = ["hypothesis", "pytest"]
-typing = ["mypy"]
-
-[[package]]
-name = "argon2-cffi-bindings"
-version = "21.2.0"
-description = "Low-level CFFI bindings for Argon2"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"},
- {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"},
- {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"},
- {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"},
- {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"},
- {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"},
- {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"},
- {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"},
- {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"},
- {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"},
- {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"},
- {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"},
- {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"},
-]
-
-[package.dependencies]
-cffi = ">=1.0.1"
-
-[package.extras]
-dev = ["cogapp", "pre-commit", "pytest", "wheel"]
-tests = ["pytest"]
-
-[[package]]
-name = "arrow"
-version = "1.3.0"
-description = "Better dates & times for Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"},
- {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"},
-]
-
-[package.dependencies]
-python-dateutil = ">=2.7.0"
-types-python-dateutil = ">=2.8.10"
-
-[package.extras]
-doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"]
-test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"]
-
-[[package]]
-name = "asciitree"
-version = "0.3.3"
-description = "Draws ASCII trees."
-optional = false
-python-versions = "*"
-files = [
- {file = "asciitree-0.3.3.tar.gz", hash = "sha256:4aa4b9b649f85e3fcb343363d97564aa1fb62e249677f2e18a96765145cc0f6e"},
-]
-
-[[package]]
-name = "asttokens"
-version = "2.4.1"
-description = "Annotate AST trees with source code positions"
-optional = true
-python-versions = "*"
-files = [
- {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"},
- {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"},
-]
-
-[package.dependencies]
-six = ">=1.12.0"
-
-[package.extras]
-astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"]
-test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
-
-[[package]]
-name = "async-lru"
-version = "2.0.4"
-description = "Simple LRU cache for asyncio"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"},
- {file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"},
-]
-
-[package.dependencies]
-typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
-
-[[package]]
-name = "async-timeout"
-version = "4.0.3"
-description = "Timeout context manager for asyncio programs"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
- {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
-]
-
-[[package]]
-name = "attrs"
-version = "24.2.0"
-description = "Classes Without Boilerplate"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
- {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
-]
-
-[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
-
-[[package]]
-name = "babel"
-version = "2.16.0"
-description = "Internationalization utilities"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"},
- {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"},
-]
-
-[package.extras]
-dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
-
-[[package]]
-name = "beautifulsoup4"
-version = "4.12.3"
-description = "Screen-scraping library"
-optional = false
-python-versions = ">=3.6.0"
-files = [
- {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"},
- {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"},
-]
-
-[package.dependencies]
-soupsieve = ">1.2"
-
-[package.extras]
-cchardet = ["cchardet"]
-chardet = ["chardet"]
-charset-normalizer = ["charset-normalizer"]
-html5lib = ["html5lib"]
-lxml = ["lxml"]
-
-[[package]]
-name = "bleach"
-version = "6.1.0"
-description = "An easy safelist-based HTML-sanitizing tool."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"},
- {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"},
-]
-
-[package.dependencies]
-six = ">=1.9.0"
-webencodings = "*"
-
-[package.extras]
-css = ["tinycss2 (>=1.1.0,<1.3)"]
-
-[[package]]
-name = "blinker"
-version = "1.8.2"
-description = "Fast, simple object-to-object and broadcast signaling"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"},
- {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"},
-]
-
-[[package]]
-name = "certifi"
-version = "2024.8.30"
-description = "Python package for providing Mozilla's CA Bundle."
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
- {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
-]
-
-[[package]]
-name = "cffi"
-version = "1.17.1"
-description = "Foreign Function Interface for Python calling C code."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
- {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
- {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
- {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
- {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
- {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
- {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
- {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
- {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
- {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
- {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
- {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
- {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
- {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
- {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
- {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
- {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
- {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
- {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
- {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
- {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
- {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
- {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
- {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
- {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
- {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
- {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
- {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
-]
-
-[package.dependencies]
-pycparser = "*"
-
-[[package]]
-name = "cfgv"
-version = "3.4.0"
-description = "Validate configuration and produce human readable error messages."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
- {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
-]
-
-[[package]]
-name = "charset-normalizer"
-version = "3.4.0"
-description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-optional = false
-python-versions = ">=3.7.0"
-files = [
- {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
- {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
- {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
- {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
- {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
- {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
- {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
- {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
- {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
- {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
-]
-
-[[package]]
-name = "chime"
-version = "0.7.0"
-description = "Python sound notifications made easy."
-optional = true
-python-versions = ">=3.6,<4.0"
-files = [
- {file = "chime-0.7.0-py3-none-any.whl", hash = "sha256:9626f8151cb008b1e0ffb7de6d1834b7013ba5fc4c4e3c9ba6e29dc9bf5feac6"},
- {file = "chime-0.7.0.tar.gz", hash = "sha256:ba4af8934ec8bd9a89a340b4433b2e500097b979823386432be7128e0b201f0d"},
-]
-
-[[package]]
-name = "click"
-version = "8.1.7"
-description = "Composable command line interface toolkit"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
- {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
-]
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[[package]]
-name = "cloudpickle"
-version = "3.0.0"
-description = "Pickler class to extend the standard pickle.Pickler functionality"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"},
- {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"},
-]
-
-[[package]]
-name = "cma"
-version = "4.0.0"
-description = "CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python"
-optional = true
-python-versions = "*"
-files = [
- {file = "cma-4.0.0-py3-none-any.whl", hash = "sha256:97b86ba1ac9f1cbb189a06c4d4a78f591f0878e5dd3e55c95e88e622e78c1a10"},
- {file = "cma-4.0.0.tar.gz", hash = "sha256:fd28ce56983bf2fca0e614189d60134ebb80bf604f070d1ea095ea4e856f13a5"},
-]
-
-[package.dependencies]
-numpy = "*"
-
-[package.extras]
-constrained-solution-tracking = ["moarchiving"]
-plotting = ["matplotlib"]
-
-[[package]]
-name = "cmake"
-version = "3.30.4"
-description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "cmake-3.30.4-py3-none-macosx_10_10_universal2.whl", hash = "sha256:8a1a30125213c3d44b81a1af0085ad1dcd77abc61bcdf330556e83898428198a"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f69b3706ae93fa48762871bdc7cb759fbbbadb04452e5eab820537c35fabcb6"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:969af8432a17168e5b88e2efba11e5e14b7ca38aa638975b7ce1b19044c5183f"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a5929e21af39a3adf4058aea54aa2197198e06315ebad541dda0baf20e2b32b"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9808d3744e57c6fd71d93e2ca95142d67578a13a8867f7e8b000f343799899f"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a223c62cfeebcb7b90f715c16bb2e83ee37e8c3e676efde83b094d62c278ec2"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08466455fbac67287a4868819ae0e0ab16d60c02eb209ae5e6d70e0e35d0e601"},
- {file = "cmake-3.30.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8a4b0e638ddbabd16cad8b053b5a66733ddaf652dc3d46d55b3887314022fe"},
- {file = "cmake-3.30.4-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:a8f3160cc2b362c0ba03d70300a36bca5a58e1f82c345f4f54a4da7f59b7b2b4"},
- {file = "cmake-3.30.4-py3-none-musllinux_1_1_i686.whl", hash = "sha256:13bd1afa2e9988973f18c2425823081a044929e80685731601f093ff673d2db7"},
- {file = "cmake-3.30.4-py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:d2ab1018a42e03cf2e843f9565bc2ff7465a1a66c1cbfaba30d494a5e26f763e"},
- {file = "cmake-3.30.4-py3-none-musllinux_1_1_s390x.whl", hash = "sha256:2d6367a438c11f0863c9cdea843acd09514e94534ce0d115bc8f7905aaff243d"},
- {file = "cmake-3.30.4-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:e4cc37735bdc7ba058abdddd3f94ac9dc32cae0f94ae68661565b39f64a9a22f"},
- {file = "cmake-3.30.4-py3-none-win32.whl", hash = "sha256:a08e9a987be5da69941f4a26dd7614fcbb5039394821fbcce9716c20a1571c0c"},
- {file = "cmake-3.30.4-py3-none-win_amd64.whl", hash = "sha256:2d128d0831924788c1e87d6ca9abe4594e2ccde718712b0fa2c8c3a99b0d1282"},
- {file = "cmake-3.30.4-py3-none-win_arm64.whl", hash = "sha256:2825874fb84bd9d05c40b1a4347366d9949c9f6bac7a9ace97ac7faf9d573b8b"},
- {file = "cmake-3.30.4.tar.gz", hash = "sha256:fedd88495e742a1316078c283c2b4c2eeac4c34eca3234401d28f09ee58a320f"},
-]
-
-[package.extras]
-test = ["coverage (>=4.2)", "pytest (>=3.0.3)", "pytest-cov (>=2.4.0)"]
-
-[[package]]
-name = "colorama"
-version = "0.4.6"
-description = "Cross-platform colored terminal text."
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-files = [
- {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
- {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
-]
-
-[[package]]
-name = "comm"
-version = "0.2.2"
-description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"},
- {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"},
-]
-
-[package.dependencies]
-traitlets = ">=4"
-
-[package.extras]
-test = ["pytest"]
-
-[[package]]
-name = "configargparse"
-version = "1.7"
-description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables."
-optional = true
-python-versions = ">=3.5"
-files = [
- {file = "ConfigArgParse-1.7-py3-none-any.whl", hash = "sha256:d249da6591465c6c26df64a9f73d2536e743be2f244eb3ebe61114af2f94f86b"},
- {file = "ConfigArgParse-1.7.tar.gz", hash = "sha256:e7067471884de5478c58a511e529f0f9bd1c66bfef1dea90935438d6c23306d1"},
-]
-
-[package.extras]
-test = ["PyYAML", "mock", "pytest"]
-yaml = ["PyYAML"]
-
-[[package]]
-name = "contourpy"
-version = "1.3.0"
-description = "Python library for calculating contours of 2D quadrilateral grids"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"},
- {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"},
- {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"},
- {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"},
- {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"},
- {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"},
- {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"},
- {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"},
- {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"},
- {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"},
- {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"},
- {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"},
- {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"},
- {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"},
- {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"},
- {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"},
- {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"},
- {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"},
- {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"},
- {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"},
- {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"},
- {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"},
- {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"},
- {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"},
- {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"},
- {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"},
- {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"},
- {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"},
- {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"},
- {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"},
- {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"},
- {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"},
- {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"},
- {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"},
- {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"},
- {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"},
- {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"},
- {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"},
- {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"},
- {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"},
- {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"},
- {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"},
- {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"},
- {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"},
- {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"},
- {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"},
- {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"},
- {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"},
- {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"},
- {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"},
- {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"},
- {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"},
- {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"},
- {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"},
- {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"},
- {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"},
- {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"},
- {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"},
- {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"},
- {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"},
- {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"},
- {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"},
- {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"},
- {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"},
- {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"},
-]
-
-[package.dependencies]
-numpy = ">=1.23"
-
-[package.extras]
-bokeh = ["bokeh", "selenium"]
-docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
-mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"]
-test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
-test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"]
-
-[[package]]
-name = "coverage"
-version = "7.6.2"
-description = "Code coverage measurement for Python"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "coverage-7.6.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9df1950fb92d49970cce38100d7e7293c84ed3606eaa16ea0b6bc27175bb667"},
- {file = "coverage-7.6.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:24500f4b0e03aab60ce575c85365beab64b44d4db837021e08339f61d1fbfe52"},
- {file = "coverage-7.6.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a663b180b6669c400b4630a24cc776f23a992d38ce7ae72ede2a397ce6b0f170"},
- {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfde025e2793a22efe8c21f807d276bd1d6a4bcc5ba6f19dbdfc4e7a12160909"},
- {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:087932079c065d7b8ebadd3a0160656c55954144af6439886c8bcf78bbbcde7f"},
- {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9c6b0c1cafd96213a0327cf680acb39f70e452caf8e9a25aeb05316db9c07f89"},
- {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e85830eed5b5263ffa0c62428e43cb844296f3b4461f09e4bdb0d44ec190bc2"},
- {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62ab4231c01e156ece1b3a187c87173f31cbeee83a5e1f6dff17f288dca93345"},
- {file = "coverage-7.6.2-cp310-cp310-win32.whl", hash = "sha256:7b80fbb0da3aebde102a37ef0138aeedff45997e22f8962e5f16ae1742852676"},
- {file = "coverage-7.6.2-cp310-cp310-win_amd64.whl", hash = "sha256:d20c3d1f31f14d6962a4e2f549c21d31e670b90f777ef4171be540fb7fb70f02"},
- {file = "coverage-7.6.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb21bac7783c1bf6f4bbe68b1e0ff0d20e7e7732cfb7995bc8d96e23aa90fc7b"},
- {file = "coverage-7.6.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b2e437fbd8fae5bc7716b9c7ff97aecc95f0b4d56e4ca08b3c8d8adcaadb84"},
- {file = "coverage-7.6.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f77f2bf5797983652d1d55f1a7272a29afcc89e3ae51caa99b2db4e89d658"},
- {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f361296ca7054f0936b02525646b2731b32c8074ba6defab524b79b2b7eeac72"},
- {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7926d8d034e06b479797c199747dd774d5e86179f2ce44294423327a88d66ca7"},
- {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0bbae11c138585c89fb4e991faefb174a80112e1a7557d507aaa07675c62e66b"},
- {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fcad7d5d2bbfeae1026b395036a8aa5abf67e8038ae7e6a25c7d0f88b10a8e6a"},
- {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f01e53575f27097d75d42de33b1b289c74b16891ce576d767ad8c48d17aeb5e0"},
- {file = "coverage-7.6.2-cp311-cp311-win32.whl", hash = "sha256:7781f4f70c9b0b39e1b129b10c7d43a4e0c91f90c60435e6da8288efc2b73438"},
- {file = "coverage-7.6.2-cp311-cp311-win_amd64.whl", hash = "sha256:9bcd51eeca35a80e76dc5794a9dd7cb04b97f0e8af620d54711793bfc1fbba4b"},
- {file = "coverage-7.6.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ebc94fadbd4a3f4215993326a6a00e47d79889391f5659bf310f55fe5d9f581c"},
- {file = "coverage-7.6.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9681516288e3dcf0aa7c26231178cc0be6cac9705cac06709f2353c5b406cfea"},
- {file = "coverage-7.6.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d9c5d13927d77af4fbe453953810db766f75401e764727e73a6ee4f82527b3e"},
- {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92f9ca04b3e719d69b02dc4a69debb795af84cb7afd09c5eb5d54b4a1ae2191"},
- {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ff2ef83d6d0b527b5c9dad73819b24a2f76fdddcfd6c4e7a4d7e73ecb0656b4"},
- {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47ccb6e99a3031ffbbd6e7cc041e70770b4fe405370c66a54dbf26a500ded80b"},
- {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a867d26f06bcd047ef716175b2696b315cb7571ccb951006d61ca80bbc356e9e"},
- {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cdfcf2e914e2ba653101157458afd0ad92a16731eeba9a611b5cbb3e7124e74b"},
- {file = "coverage-7.6.2-cp312-cp312-win32.whl", hash = "sha256:f9035695dadfb397bee9eeaf1dc7fbeda483bf7664a7397a629846800ce6e276"},
- {file = "coverage-7.6.2-cp312-cp312-win_amd64.whl", hash = "sha256:5ed69befa9a9fc796fe015a7040c9398722d6b97df73a6b608e9e275fa0932b0"},
- {file = "coverage-7.6.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eea60c79d36a8f39475b1af887663bc3ae4f31289cd216f514ce18d5938df40"},
- {file = "coverage-7.6.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa68a6cdbe1bc6793a9dbfc38302c11599bbe1837392ae9b1d238b9ef3dafcf1"},
- {file = "coverage-7.6.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ec528ae69f0a139690fad6deac8a7d33629fa61ccce693fdd07ddf7e9931fba"},
- {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed5ac02126f74d190fa2cc14a9eb2a5d9837d5863920fa472b02eb1595cdc925"},
- {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21c0ea0d4db8a36b275cb6fb2437a3715697a4ba3cb7b918d3525cc75f726304"},
- {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:35a51598f29b2a19e26d0908bd196f771a9b1c5d9a07bf20be0adf28f1ad4f77"},
- {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c9192925acc33e146864b8cf037e2ed32a91fdf7644ae875f5d46cd2ef086a5f"},
- {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf4eeecc9e10f5403ec06138978235af79c9a79af494eb6b1d60a50b49ed2869"},
- {file = "coverage-7.6.2-cp313-cp313-win32.whl", hash = "sha256:e4ee15b267d2dad3e8759ca441ad450c334f3733304c55210c2a44516e8d5530"},
- {file = "coverage-7.6.2-cp313-cp313-win_amd64.whl", hash = "sha256:c71965d1ced48bf97aab79fad56df82c566b4c498ffc09c2094605727c4b7e36"},
- {file = "coverage-7.6.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7571e8bbecc6ac066256f9de40365ff833553e2e0c0c004f4482facb131820ef"},
- {file = "coverage-7.6.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:078a87519057dacb5d77e333f740708ec2a8f768655f1db07f8dfd28d7a005f0"},
- {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e5e92e3e84a8718d2de36cd8387459cba9a4508337b8c5f450ce42b87a9e760"},
- {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebabdf1c76593a09ee18c1a06cd3022919861365219ea3aca0247ededf6facd6"},
- {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12179eb0575b8900912711688e45474f04ab3934aaa7b624dea7b3c511ecc90f"},
- {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:39d3b964abfe1519b9d313ab28abf1d02faea26cd14b27f5283849bf59479ff5"},
- {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:84c4315577f7cd511d6250ffd0f695c825efe729f4205c0340f7004eda51191f"},
- {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff797320dcbff57caa6b2301c3913784a010e13b1f6cf4ab3f563f3c5e7919db"},
- {file = "coverage-7.6.2-cp313-cp313t-win32.whl", hash = "sha256:2b636a301e53964550e2f3094484fa5a96e699db318d65398cfba438c5c92171"},
- {file = "coverage-7.6.2-cp313-cp313t-win_amd64.whl", hash = "sha256:d03a060ac1a08e10589c27d509bbdb35b65f2d7f3f8d81cf2fa199877c7bc58a"},
- {file = "coverage-7.6.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c37faddc8acd826cfc5e2392531aba734b229741d3daec7f4c777a8f0d4993e5"},
- {file = "coverage-7.6.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab31fdd643f162c467cfe6a86e9cb5f1965b632e5e65c072d90854ff486d02cf"},
- {file = "coverage-7.6.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97df87e1a20deb75ac7d920c812e9326096aa00a9a4b6d07679b4f1f14b06c90"},
- {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343056c5e0737487a5291f5691f4dfeb25b3e3c8699b4d36b92bb0e586219d14"},
- {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4ef1c56b47b6b9024b939d503ab487231df1f722065a48f4fc61832130b90e"},
- {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fca4a92c8a7a73dee6946471bce6d1443d94155694b893b79e19ca2a540d86e"},
- {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69f251804e052fc46d29d0e7348cdc5fcbfc4861dc4a1ebedef7e78d241ad39e"},
- {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e8ea055b3ea046c0f66217af65bc193bbbeca1c8661dc5fd42698db5795d2627"},
- {file = "coverage-7.6.2-cp39-cp39-win32.whl", hash = "sha256:6c2ba1e0c24d8fae8f2cf0aeb2fc0a2a7f69b6d20bd8d3749fd6b36ecef5edf0"},
- {file = "coverage-7.6.2-cp39-cp39-win_amd64.whl", hash = "sha256:2186369a654a15628e9c1c9921409a6b3eda833e4b91f3ca2a7d9f77abb4987c"},
- {file = "coverage-7.6.2-pp39.pp310-none-any.whl", hash = "sha256:667952739daafe9616db19fbedbdb87917eee253ac4f31d70c7587f7ab531b4e"},
- {file = "coverage-7.6.2.tar.gz", hash = "sha256:a5f81e68aa62bc0cfca04f7b19eaa8f9c826b53fc82ab9e2121976dc74f131f3"},
-]
-
-[package.dependencies]
-tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
-
-[package.extras]
-toml = ["tomli"]
-
-[[package]]
-name = "cycler"
-version = "0.12.1"
-description = "Composable style cycles"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
- {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
-]
-
-[package.extras]
-docs = ["ipython", "matplotlib", "numpydoc", "sphinx"]
-tests = ["pytest", "pytest-cov", "pytest-xdist"]
-
-[[package]]
-name = "dash"
-version = "2.9.3"
-description = "A Python framework for building reactive web-apps. Developed by Plotly."
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "dash-2.9.3-py3-none-any.whl", hash = "sha256:a749ae1ea9de3fe7b785353a818ec9b629d39c6b7e02462954203bd1e296fd0e"},
- {file = "dash-2.9.3.tar.gz", hash = "sha256:47392f8d6455dc989a697407eb5941f3bad80604df985ab1ac9d4244568ffb34"},
-]
-
-[package.dependencies]
-dash-core-components = "2.0.0"
-dash-html-components = "2.0.0"
-dash-table = "5.0.0"
-Flask = ">=1.0.4"
-plotly = ">=5.0.0"
-
-[package.extras]
-celery = ["celery[redis] (>=5.1.2)", "importlib-metadata (<5)", "redis (>=3.5.3)"]
-ci = ["black (==21.6b0)", "black (==22.3.0)", "dash-dangerously-set-inner-html", "dash-flow-example (==0.0.5)", "flake8 (==3.9.2)", "flaky (==3.7.0)", "flask-talisman (==1.0.0)", "isort (==4.3.21)", "mimesis", "mock (==4.0.3)", "numpy", "openpyxl", "orjson (==3.5.4)", "orjson (==3.6.7)", "pandas (==1.1.5)", "pandas (>=1.4.0)", "preconditions", "pyarrow", "pyarrow (<3)", "pylint (==2.13.5)", "pytest-mock", "pytest-rerunfailures", "pytest-sugar (==0.9.6)", "xlrd (<2)", "xlrd (>=2.0.1)"]
-compress = ["flask-compress"]
-dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"]
-diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"]
-testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"]
-
-[[package]]
-name = "dash-core-components"
-version = "2.0.0"
-description = "Core component suite for Dash"
-optional = true
-python-versions = "*"
-files = [
- {file = "dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346"},
- {file = "dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee"},
-]
-
-[[package]]
-name = "dash-html-components"
-version = "2.0.0"
-description = "Vanilla HTML components for Dash"
-optional = true
-python-versions = "*"
-files = [
- {file = "dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63"},
- {file = "dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50"},
-]
-
-[[package]]
-name = "dash-table"
-version = "5.0.0"
-description = "Dash table"
-optional = true
-python-versions = "*"
-files = [
- {file = "dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9"},
- {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"},
-]
-
-[[package]]
-name = "datasets"
-version = "3.0.1"
-description = "HuggingFace community-driven open-source library of datasets"
-optional = false
-python-versions = ">=3.8.0"
-files = [
- {file = "datasets-3.0.1-py3-none-any.whl", hash = "sha256:db080aab41c8cc68645117a0f172e5c6789cbc672f066de0aa5a08fc3eebc686"},
- {file = "datasets-3.0.1.tar.gz", hash = "sha256:40d63b09e76a3066c32e746d6fdc36fd3f29ed2acd49bf5b1a2100da32936511"},
-]
-
-[package.dependencies]
-aiohttp = "*"
-dill = ">=0.3.0,<0.3.9"
-filelock = "*"
-fsspec = {version = ">=2023.1.0,<=2024.6.1", extras = ["http"]}
-huggingface-hub = ">=0.22.0"
-multiprocess = "*"
-numpy = ">=1.17"
-packaging = "*"
-pandas = "*"
-pyarrow = ">=15.0.0"
-pyyaml = ">=5.1"
-requests = ">=2.32.2"
-tqdm = ">=4.66.3"
-xxhash = "*"
-
-[package.extras]
-audio = ["librosa", "soundfile (>=0.12.1)", "soxr (>=0.4.0)"]
-benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"]
-dev = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tensorflow (>=2.16.0)", "tensorflow (>=2.6.0)", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "torchdata", "transformers", "transformers (>=4.42.0)", "zstandard"]
-docs = ["s3fs", "tensorflow (>=2.6.0)", "torch", "transformers"]
-jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"]
-quality = ["ruff (>=0.3.0)"]
-s3 = ["s3fs"]
-tensorflow = ["tensorflow (>=2.6.0)"]
-tensorflow-gpu = ["tensorflow (>=2.6.0)"]
-tests = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tensorflow (>=2.16.0)", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "torchdata", "transformers (>=4.42.0)", "zstandard"]
-tests-numpy2 = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tiktoken", "torch (>=2.0.0)", "torchdata", "transformers (>=4.42.0)", "zstandard"]
-torch = ["torch"]
-vision = ["Pillow (>=9.4.0)"]
-
-[[package]]
-name = "debugpy"
-version = "1.8.7"
-description = "An implementation of the Debug Adapter Protocol for Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "debugpy-1.8.7-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95fe04a573b8b22896c404365e03f4eda0ce0ba135b7667a1e57bd079793b96b"},
- {file = "debugpy-1.8.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:628a11f4b295ffb4141d8242a9bb52b77ad4a63a2ad19217a93be0f77f2c28c9"},
- {file = "debugpy-1.8.7-cp310-cp310-win32.whl", hash = "sha256:85ce9c1d0eebf622f86cc68618ad64bf66c4fc3197d88f74bb695a416837dd55"},
- {file = "debugpy-1.8.7-cp310-cp310-win_amd64.whl", hash = "sha256:29e1571c276d643757ea126d014abda081eb5ea4c851628b33de0c2b6245b037"},
- {file = "debugpy-1.8.7-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:caf528ff9e7308b74a1749c183d6808ffbedbb9fb6af78b033c28974d9b8831f"},
- {file = "debugpy-1.8.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba1d078cf2e1e0b8402e6bda528bf8fda7ccd158c3dba6c012b7897747c41a0"},
- {file = "debugpy-1.8.7-cp311-cp311-win32.whl", hash = "sha256:171899588bcd412151e593bd40d9907133a7622cd6ecdbdb75f89d1551df13c2"},
- {file = "debugpy-1.8.7-cp311-cp311-win_amd64.whl", hash = "sha256:6e1c4ffb0c79f66e89dfd97944f335880f0d50ad29525dc792785384923e2211"},
- {file = "debugpy-1.8.7-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:4d27d842311353ede0ad572600c62e4bcd74f458ee01ab0dd3a1a4457e7e3706"},
- {file = "debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2"},
- {file = "debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca"},
- {file = "debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39"},
- {file = "debugpy-1.8.7-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:d050a1ec7e925f514f0f6594a1e522580317da31fbda1af71d1530d6ea1f2b40"},
- {file = "debugpy-1.8.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f4349a28e3228a42958f8ddaa6333d6f8282d5edaea456070e48609c5983b7"},
- {file = "debugpy-1.8.7-cp313-cp313-win32.whl", hash = "sha256:11ad72eb9ddb436afb8337891a986302e14944f0f755fd94e90d0d71e9100bba"},
- {file = "debugpy-1.8.7-cp313-cp313-win_amd64.whl", hash = "sha256:2efb84d6789352d7950b03d7f866e6d180284bc02c7e12cb37b489b7083d81aa"},
- {file = "debugpy-1.8.7-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:4b908291a1d051ef3331484de8e959ef3e66f12b5e610c203b5b75d2725613a7"},
- {file = "debugpy-1.8.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da8df5b89a41f1fd31503b179d0a84a5fdb752dddd5b5388dbd1ae23cda31ce9"},
- {file = "debugpy-1.8.7-cp38-cp38-win32.whl", hash = "sha256:b12515e04720e9e5c2216cc7086d0edadf25d7ab7e3564ec8b4521cf111b4f8c"},
- {file = "debugpy-1.8.7-cp38-cp38-win_amd64.whl", hash = "sha256:93176e7672551cb5281577cdb62c63aadc87ec036f0c6a486f0ded337c504596"},
- {file = "debugpy-1.8.7-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:90d93e4f2db442f8222dec5ec55ccfc8005821028982f1968ebf551d32b28907"},
- {file = "debugpy-1.8.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6db2a370e2700557a976eaadb16243ec9c91bd46f1b3bb15376d7aaa7632c81"},
- {file = "debugpy-1.8.7-cp39-cp39-win32.whl", hash = "sha256:a6cf2510740e0c0b4a40330640e4b454f928c7b99b0c9dbf48b11efba08a8cda"},
- {file = "debugpy-1.8.7-cp39-cp39-win_amd64.whl", hash = "sha256:6a9d9d6d31846d8e34f52987ee0f1a904c7baa4912bf4843ab39dadf9b8f3e0d"},
- {file = "debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae"},
- {file = "debugpy-1.8.7.zip", hash = "sha256:18b8f731ed3e2e1df8e9cdaa23fb1fc9c24e570cd0081625308ec51c82efe42e"},
-]
-
-[[package]]
-name = "decorator"
-version = "5.1.1"
-description = "Decorators for Humans"
-optional = true
-python-versions = ">=3.5"
-files = [
- {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
- {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
-]
-
-[[package]]
-name = "deepdiff"
-version = "8.0.1"
-description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "deepdiff-8.0.1-py3-none-any.whl", hash = "sha256:42e99004ce603f9a53934c634a57b04ad5900e0d8ed0abb15e635767489cbc05"},
- {file = "deepdiff-8.0.1.tar.gz", hash = "sha256:245599a4586ab59bb599ca3517a9c42f3318ff600ded5e80a3432693c8ec3c4b"},
-]
-
-[package.dependencies]
-orderly-set = "5.2.2"
-
-[package.extras]
-cli = ["click (==8.1.7)", "pyyaml (==6.0.1)"]
-optimize = ["orjson"]
-
-[[package]]
-name = "defusedxml"
-version = "0.7.1"
-description = "XML bomb protection for Python stdlib modules"
-optional = true
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-files = [
- {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
- {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
-]
-
-[[package]]
-name = "diffusers"
-version = "0.30.3"
-description = "State-of-the-art diffusion in PyTorch and JAX."
-optional = false
-python-versions = ">=3.8.0"
-files = [
- {file = "diffusers-0.30.3-py3-none-any.whl", hash = "sha256:1b70209e4d2c61223b96a7e13bc4d70869c8b0b68f54a35ce3a67fcf813edeee"},
- {file = "diffusers-0.30.3.tar.gz", hash = "sha256:67c5eb25d5b50bf0742624ef43fe0f6d1e1604f64aad3e8558469cbe89ecf72f"},
-]
-
-[package.dependencies]
-filelock = "*"
-huggingface-hub = ">=0.23.2"
-importlib-metadata = "*"
-numpy = "*"
-Pillow = "*"
-regex = "!=2019.12.17"
-requests = "*"
-safetensors = ">=0.3.1"
-
-[package.extras]
-dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.31.0)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4)", "torchvision", "transformers (>=4.41.2)", "urllib3 (<=2.0.0)"]
-docs = ["hf-doc-builder (>=0.3.0)"]
-flax = ["flax (>=0.4.1)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)"]
-quality = ["hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<=2.0.0)"]
-test = ["GitPython (<3.1.19)", "Jinja2", "compel (==0.1.8)", "datasets", "invisible-watermark (>=0.2.0)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "torchvision", "transformers (>=4.41.2)"]
-torch = ["accelerate (>=0.31.0)", "torch (>=1.4)"]
-training = ["Jinja2", "accelerate (>=0.31.0)", "datasets", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "tensorboard"]
-
-[[package]]
-name = "dill"
-version = "0.3.8"
-description = "serialize all of Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"},
- {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"},
-]
-
-[package.extras]
-graph = ["objgraph (>=1.7.2)"]
-profile = ["gprof2dot (>=2022.7.29)"]
-
-[[package]]
-name = "distlib"
-version = "0.3.9"
-description = "Distribution utilities"
-optional = true
-python-versions = "*"
-files = [
- {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"},
- {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"},
-]
-
-[[package]]
-name = "dm-control"
-version = "1.0.14"
-description = "Continuous control environments and MuJoCo Python bindings."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "dm_control-1.0.14-py3-none-any.whl", hash = "sha256:883c63244a7ebf598700a97564ed19fffd3479ca79efd090aed881609cdb9fc6"},
- {file = "dm_control-1.0.14.tar.gz", hash = "sha256:def1ece747b6f175c581150826b50f1a6134086dab34f8f3fd2d088ea035cf3d"},
-]
-
-[package.dependencies]
-absl-py = ">=0.7.0"
-dm-env = "*"
-dm-tree = "!=0.1.2"
-glfw = "*"
-labmaze = "*"
-lxml = "*"
-mujoco = ">=2.3.7"
-numpy = ">=1.9.0"
-protobuf = ">=3.19.4"
-pyopengl = ">=3.1.4"
-pyparsing = ">=3.0.0"
-requests = "*"
-scipy = "*"
-setuptools = "!=50.0.0"
-tqdm = "*"
-
-[package.extras]
-hdf5 = ["h5py"]
-
-[[package]]
-name = "dm-env"
-version = "1.6"
-description = "A Python interface for Reinforcement Learning environments."
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "dm-env-1.6.tar.gz", hash = "sha256:a436eb1c654c39e0c986a516cee218bea7140b510fceff63f97eb4fcff3d93de"},
- {file = "dm_env-1.6-py3-none-any.whl", hash = "sha256:0eabb6759dd453b625e041032f7ae0c1e87d4eb61b6a96b9ca586483837abf29"},
-]
-
-[package.dependencies]
-absl-py = "*"
-dm-tree = "*"
-numpy = "*"
-
-[[package]]
-name = "dm-tree"
-version = "0.1.8"
-description = "Tree is a library for working with nested data structures."
-optional = true
-python-versions = "*"
-files = [
- {file = "dm-tree-0.1.8.tar.gz", hash = "sha256:0fcaabbb14e7980377439e7140bd05552739ca5e515ecb3119f234acee4b9430"},
- {file = "dm_tree-0.1.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:35cc164a79336bfcfafb47e5f297898359123bbd3330c1967f0c4994f9cf9f60"},
- {file = "dm_tree-0.1.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39070ba268c0491af9fe7a58644d99e8b4f2cde6e5884ba3380bddc84ed43d5f"},
- {file = "dm_tree-0.1.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2869228d9c619074de501a3c10dc7f07c75422f8fab36ecdcb859b6f1b1ec3ef"},
- {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d20f2faa3672b52e5013f4077117bfb99c4cfc0b445d3bde1584c34032b57436"},
- {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5483dca4d7eb1a0d65fe86d3b6a53ae717face83c1f17e0887b1a4a64ae5c410"},
- {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d7c26e431fc93cc7e0cba867eb000db6a05f6f2b25af11ac4e9dada88fc5bca"},
- {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d714371bb08839e4e5e29024fc95832d9affe129825ef38836b143028bd144"},
- {file = "dm_tree-0.1.8-cp310-cp310-win_amd64.whl", hash = "sha256:d40fa4106ca6edc66760246a08f500ec0c85ef55c762fb4a363f6ee739ba02ee"},
- {file = "dm_tree-0.1.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad16ceba90a56ec47cf45b21856d14962ac314787975ef786efb5e6e9ca75ec7"},
- {file = "dm_tree-0.1.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:803bfc53b4659f447ac694dbd04235f94a73ef7c1fd1e0df7c84ac41e0bc963b"},
- {file = "dm_tree-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:378cc8ad93c5fe3590f405a309980721f021c790ca1bdf9b15bb1d59daec57f5"},
- {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1607ce49aa42f010d1e5e616d92ce899d66835d4d8bea49679582435285515de"},
- {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343a4a4ebaa127451ff971254a4be4084eb4bdc0b2513c32b46f6f728fd03f9e"},
- {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d"},
- {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393"},
- {file = "dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80"},
- {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea9e59e0451e7d29aece402d9f908f2e2a80922bcde2ebfd5dcb07750fcbfee8"},
- {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:94d3f0826311f45ee19b75f5b48c99466e4218a0489e81c0f0167bda50cacf22"},
- {file = "dm_tree-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:435227cf3c5dc63f4de054cf3d00183790bd9ead4c3623138c74dde7f67f521b"},
- {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09964470f76a5201aff2e8f9b26842976de7889300676f927930f6285e256760"},
- {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75c5d528bb992981c20793b6b453e91560784215dffb8a5440ba999753c14ceb"},
- {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a94aba18a35457a1b5cd716fd7b46c5dafdc4cf7869b4bae665b91c4682a8e"},
- {file = "dm_tree-0.1.8-cp312-cp312-win_amd64.whl", hash = "sha256:96a548a406a6fb15fe58f6a30a57ff2f2aafbf25f05afab00c8f5e5977b6c715"},
- {file = "dm_tree-0.1.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8c60a7eadab64c2278861f56bca320b2720f163dca9d7558103c3b77f2416571"},
- {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af4b3d372f2477dcd89a6e717e4a575ca35ccc20cc4454a8a4b6f8838a00672d"},
- {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de287fabc464b8734be251e46e06aa9aa1001f34198da2b6ce07bd197172b9cb"},
- {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:054b461f8176f4bce7a21f7b1870f873a1ced3bdbe1282c816c550bb43c71fa6"},
- {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f7915660f59c09068e428613c480150180df1060561fd0d1470684ae7007bd1"},
- {file = "dm_tree-0.1.8-cp37-cp37m-win_amd64.whl", hash = "sha256:b9f89a454e98806b44fe9d40ec9eee61f848388f7e79ac2371a55679bd5a3ac6"},
- {file = "dm_tree-0.1.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0e9620ccf06393eb6b613b5e366469304622d4ea96ae6540b28a33840e6c89cf"},
- {file = "dm_tree-0.1.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b095ba4f8ca1ba19350fd53cf1f8f3eb0bd406aa28af64a6dfc86707b32a810a"},
- {file = "dm_tree-0.1.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b9bd9b9ccb59409d33d51d84b7668010c04c2af7d4a371632874c1ca356cff3d"},
- {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d3172394079a86c3a759179c65f64c48d1a42b89495fcf38976d11cc3bb952c"},
- {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1612fcaecd79023dbc6a6ae48d51a80beb5c385d6f3f6d71688e57bc8d07de8"},
- {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5c8c12e3fda754ef6af94161bacdaeda816d941995fac415d6855c6c386af68"},
- {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:694c3654cfd2a81552c08ec66bb5c4a3d48fa292b9a181880fb081c36c5b9134"},
- {file = "dm_tree-0.1.8-cp38-cp38-win_amd64.whl", hash = "sha256:bb2d109f42190225112da899b9f3d46d0d5f26aef501c61e43529fe9322530b5"},
- {file = "dm_tree-0.1.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d16e1f2a073604cfcc09f7131ae8d534674f43c3aef4c25742eae295bc60d04f"},
- {file = "dm_tree-0.1.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:250b692fb75f45f02e2f58fbef9ab338904ef334b90557565621fa251df267cf"},
- {file = "dm_tree-0.1.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81fce77f22a302d7a5968aebdf4efafef4def7ce96528719a354e6990dcd49c7"},
- {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7ac31b9aecccb2c6e1ab29706f6ded3eba0c2c69c770322c9c685929c3d6afb"},
- {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe962015b2fe1282892b28ebe962faed53c7f98d942da9a4625cbf27baef913"},
- {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c52cbf4f8b3dbd0beaedf44f69fa85eec5e9dede612e08035e06ada6ec9426"},
- {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:181c35521d480d0365f39300542cb6cd7fd2b77351bb43d7acfda15aef63b317"},
- {file = "dm_tree-0.1.8-cp39-cp39-win_amd64.whl", hash = "sha256:8ed3564abed97c806db122c2d3e1a2b64c74a63debe9903aad795167cc301368"},
-]
-
-[[package]]
-name = "docker-pycreds"
-version = "0.4.0"
-description = "Python bindings for the docker credentials store API"
-optional = false
-python-versions = "*"
-files = [
- {file = "docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4"},
- {file = "docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49"},
-]
-
-[package.dependencies]
-six = ">=1.4.0"
-
-[[package]]
-name = "dora-rs"
-version = "0.3.6"
-description = "`dora` goal is to be a low latency, composable, and distributed data flow."
-optional = true
-python-versions = "*"
-files = [
- {file = "dora_rs-0.3.6-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c036d2d0792d8d6e0e9db1936ab5fd4c6d19e097f3fc259058733e526f94253a"},
- {file = "dora_rs-0.3.6-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:95036f6fcb5aeb7bba8a1f37d84c627eefe09af1db17e36bc19209e950652446"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b5ef774dbafbdf8bda56939c6475916b7ec8f4b0c57c5b80f1d46eb642f5d07"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:78656d3ae1282a142a5fed410ec3a6f725fdf8d9f9192ed673e336ea3b083e12"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:681e22c8ecb3b48d11cb9019f8a32d4ae1e353e20d4ce3a0f0eedd0ccbd95e5f"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4598572bab6f726ec41fabb43bf0f7e3cf8082ea0f6f8f4e57845a6c919f31b3"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:157fc1fed50946646f09df75c6d52198735a5973e53d252199bbb1c65e1594d2"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_armv7l.whl", hash = "sha256:7ae2724c181be10692c24fb8d9ce2a99a9afc57237332c3658e2ea6f4f33c091"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_i686.whl", hash = "sha256:3d324835f292edd81b962f8c0df44f7f47c0a6f8fe6f7d081951aeb1f5ba57d2"},
- {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:474c087b5e584293685a7d4837165b2ead96dc74fb435ae50d5fa0ac168a0de0"},
- {file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:297350f05f5f87a0bf647a1e5b4446728e5f800788c6bb28b462bcd167f1de7f"},
- {file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:b1870a8e30f0ac298d17fd546224348d13a648bcfa0cbc51dba7e5136c1af928"},
- {file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:182a189212d41be0c960fd3299bf6731af2e771f8858cfb1be7ebcc17d60a254"},
- {file = "dora_rs-0.3.6-cp37-abi3-win_amd64.whl", hash = "sha256:a8f9343073e3fbca6bff3f0a13e5d2feabbe841a985c49e4294f7c14eb747bb5"},
-]
-
-[package.dependencies]
-pyarrow = "*"
-
-[[package]]
-name = "drawnow"
-version = "0.72.5"
-description = "MATLAB-like drawnow"
-optional = true
-python-versions = "*"
-files = [
- {file = "drawnow-0.72.5-py3-none-any.whl", hash = "sha256:4ff83a8b15f61a781edaaa2a3e6b71e2c8fd948960f188b870def701afcfa0d5"},
- {file = "drawnow-0.72.5.tar.gz", hash = "sha256:9d1855605b2ec6ebc4e8a95201a7a0068eb1e2a5d1695cb1b7c462d660f32593"},
-]
-
-[package.dependencies]
-matplotlib = ">=1.5"
-
-[[package]]
-name = "dynamixel-sdk"
-version = "3.7.31"
-description = "Dynamixel SDK 3. python package"
-optional = true
-python-versions = "*"
-files = [
- {file = "dynamixel_sdk-3.7.31-py3-none-any.whl", hash = "sha256:74e8c112ca6b0b869b196dd8c6a44ffd5dd5c1a3cb9fe2030e9933922406b466"},
-]
-
-[package.dependencies]
-pyserial = "*"
-
-[[package]]
-name = "einops"
-version = "0.8.0"
-description = "A new flavour of deep learning operations"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "einops-0.8.0-py3-none-any.whl", hash = "sha256:9572fb63046264a862693b0a87088af3bdc8c068fde03de63453cbbde245465f"},
- {file = "einops-0.8.0.tar.gz", hash = "sha256:63486517fed345712a8385c100cb279108d9d47e6ae59099b07657e983deae85"},
-]
-
-[[package]]
-name = "evdev"
-version = "1.7.1"
-description = "Bindings to the Linux input handling subsystem"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "evdev-1.7.1.tar.gz", hash = "sha256:0c72c370bda29d857e188d931019c32651a9c1ea977c08c8d939b1ced1637fde"},
-]
-
-[[package]]
-name = "exceptiongroup"
-version = "1.2.2"
-description = "Backport of PEP 654 (exception groups)"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
- {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
-]
-
-[package.extras]
-test = ["pytest (>=6)"]
-
-[[package]]
-name = "executing"
-version = "2.1.0"
-description = "Get the currently executing AST node of a frame, and other information"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"},
- {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"},
-]
-
-[package.extras]
-tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"]
-
-[[package]]
-name = "farama-notifications"
-version = "0.0.4"
-description = "Notifications for all Farama Foundation maintained libraries."
-optional = false
-python-versions = "*"
-files = [
- {file = "Farama-Notifications-0.0.4.tar.gz", hash = "sha256:13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18"},
- {file = "Farama_Notifications-0.0.4-py3-none-any.whl", hash = "sha256:14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae"},
-]
-
-[[package]]
-name = "fasteners"
-version = "0.19"
-description = "A python package that provides useful locks"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "fasteners-0.19-py3-none-any.whl", hash = "sha256:758819cb5d94cdedf4e836988b74de396ceacb8e2794d21f82d131fd9ee77237"},
- {file = "fasteners-0.19.tar.gz", hash = "sha256:b4f37c3ac52d8a445af3a66bce57b33b5e90b97c696b7b984f530cf8f0ded09c"},
-]
-
-[[package]]
-name = "fastjsonschema"
-version = "2.20.0"
-description = "Fastest Python implementation of JSON schema"
-optional = true
-python-versions = "*"
-files = [
- {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"},
- {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"},
-]
-
-[package.extras]
-devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]
-
-[[package]]
-name = "feetech-servo-sdk"
-version = "1.0.0"
-description = "This is source code from official feetech repository"
-optional = true
-python-versions = "*"
-files = [
- {file = "feetech-servo-sdk-1.0.0.tar.gz", hash = "sha256:d4d3832e4b1b22a8222133a414db9f868224c2fb639426a1b11d96ddfe84e69c"},
-]
-
-[package.dependencies]
-pyserial = "*"
-
-[[package]]
-name = "filelock"
-version = "3.16.1"
-description = "A platform independent file lock."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"},
- {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"},
-]
-
-[package.extras]
-docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"]
-testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"]
-typing = ["typing-extensions (>=4.12.2)"]
-
-[[package]]
-name = "flask"
-version = "3.0.3"
-description = "A simple framework for building complex web applications."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "flask-3.0.3-py3-none-any.whl", hash = "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3"},
- {file = "flask-3.0.3.tar.gz", hash = "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842"},
-]
-
-[package.dependencies]
-blinker = ">=1.6.2"
-click = ">=8.1.3"
-itsdangerous = ">=2.1.2"
-Jinja2 = ">=3.1.2"
-Werkzeug = ">=3.0.0"
-
-[package.extras]
-async = ["asgiref (>=3.2)"]
-dotenv = ["python-dotenv"]
-
-[[package]]
-name = "fonttools"
-version = "4.54.1"
-description = "Tools to manipulate font files"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"},
- {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"},
- {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"},
- {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"},
- {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"},
- {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"},
- {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"},
- {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"},
- {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"},
- {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"},
- {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"},
- {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"},
- {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"},
- {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"},
- {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"},
- {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"},
- {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"},
- {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"},
- {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"},
- {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"},
- {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"},
- {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"},
- {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"},
- {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"},
- {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"},
- {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"},
- {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"},
- {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"},
- {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"},
- {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"},
- {file = "fonttools-4.54.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ed2f80ca07025551636c555dec2b755dd005e2ea8fbeb99fc5cdff319b70b23b"},
- {file = "fonttools-4.54.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9dc080e5a1c3b2656caff2ac2633d009b3a9ff7b5e93d0452f40cd76d3da3b3c"},
- {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d152d1be65652fc65e695e5619e0aa0982295a95a9b29b52b85775243c06556"},
- {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8583e563df41fdecef31b793b4dd3af8a9caa03397be648945ad32717a92885b"},
- {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d1d353ef198c422515a3e974a1e8d5b304cd54a4c2eebcae708e37cd9eeffb1"},
- {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fda582236fee135d4daeca056c8c88ec5f6f6d88a004a79b84a02547c8f57386"},
- {file = "fonttools-4.54.1-cp38-cp38-win32.whl", hash = "sha256:e7d82b9e56716ed32574ee106cabca80992e6bbdcf25a88d97d21f73a0aae664"},
- {file = "fonttools-4.54.1-cp38-cp38-win_amd64.whl", hash = "sha256:ada215fd079e23e060157aab12eba0d66704316547f334eee9ff26f8c0d7b8ab"},
- {file = "fonttools-4.54.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5b8a096e649768c2f4233f947cf9737f8dbf8728b90e2771e2497c6e3d21d13"},
- {file = "fonttools-4.54.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e10d2e0a12e18f4e2dd031e1bf7c3d7017be5c8dbe524d07706179f355c5dac"},
- {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31c32d7d4b0958600eac75eaf524b7b7cb68d3a8c196635252b7a2c30d80e986"},
- {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c39287f5c8f4a0c5a55daf9eaf9ccd223ea59eed3f6d467133cc727d7b943a55"},
- {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a7a310c6e0471602fe3bf8efaf193d396ea561486aeaa7adc1f132e02d30c4b9"},
- {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d3b659d1029946f4ff9b6183984578041b520ce0f8fb7078bb37ec7445806b33"},
- {file = "fonttools-4.54.1-cp39-cp39-win32.whl", hash = "sha256:e96bc94c8cda58f577277d4a71f51c8e2129b8b36fd05adece6320dd3d57de8a"},
- {file = "fonttools-4.54.1-cp39-cp39-win_amd64.whl", hash = "sha256:e8a4b261c1ef91e7188a30571be6ad98d1c6d9fa2427244c545e2fa0a2494dd7"},
- {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"},
- {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"},
-]
-
-[package.extras]
-all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
-graphite = ["lz4 (>=1.7.4.2)"]
-interpolatable = ["munkres", "pycairo", "scipy"]
-lxml = ["lxml (>=4.0)"]
-pathops = ["skia-pathops (>=0.5.0)"]
-plot = ["matplotlib"]
-repacker = ["uharfbuzz (>=0.23.0)"]
-symfont = ["sympy"]
-type1 = ["xattr"]
-ufo = ["fs (>=2.2.0,<3)"]
-unicode = ["unicodedata2 (>=15.1.0)"]
-woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
-
-[[package]]
-name = "fqdn"
-version = "1.5.1"
-description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers"
-optional = true
-python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4"
-files = [
- {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"},
- {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"},
-]
-
-[[package]]
-name = "freetype-py"
-version = "2.5.1"
-description = "Freetype python bindings"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "freetype-py-2.5.1.zip", hash = "sha256:cfe2686a174d0dd3d71a9d8ee9bf6a2c23f5872385cf8ce9f24af83d076e2fbd"},
- {file = "freetype_py-2.5.1-py3-none-macosx_10_9_universal2.whl", hash = "sha256:d01ded2557694f06aa0413f3400c0c0b2b5ebcaabeef7aaf3d756be44f51e90b"},
- {file = "freetype_py-2.5.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d2f6b3d68496797da23204b3b9c4e77e67559c80390fc0dc8b3f454ae1cd819"},
- {file = "freetype_py-2.5.1-py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:289b443547e03a4f85302e3ac91376838e0d11636050166662a4f75e3087ed0b"},
- {file = "freetype_py-2.5.1-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:cd3bfdbb7e1a84818cfbc8025fca3096f4f2afcd5d4641184bf0a3a2e6f97bbf"},
- {file = "freetype_py-2.5.1-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:3c1aefc4f0d5b7425f014daccc5fdc7c6f914fb7d6a695cc684f1c09cd8c1660"},
- {file = "freetype_py-2.5.1-py3-none-win_amd64.whl", hash = "sha256:0b7f8e0342779f65ca13ef8bc103938366fecade23e6bb37cb671c2b8ad7f124"},
-]
-
-[[package]]
-name = "frozenlist"
-version = "1.4.1"
-description = "A list-like structure which implements collections.abc.MutableSequence"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"},
- {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"},
- {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"},
- {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"},
- {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"},
- {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"},
- {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"},
- {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"},
- {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"},
- {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"},
- {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"},
- {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"},
- {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"},
- {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"},
- {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"},
- {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"},
- {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"},
- {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"},
- {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"},
- {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"},
- {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"},
- {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"},
- {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"},
- {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"},
- {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"},
- {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"},
- {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"},
- {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"},
- {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"},
- {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"},
- {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"},
- {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"},
- {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"},
- {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"},
- {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"},
- {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"},
- {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"},
- {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"},
- {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"},
- {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"},
- {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"},
- {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"},
- {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"},
- {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"},
- {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"},
- {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"},
- {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"},
- {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"},
- {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"},
- {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"},
- {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"},
- {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"},
- {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"},
- {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"},
- {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"},
- {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"},
- {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"},
- {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"},
- {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"},
- {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"},
- {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"},
- {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"},
- {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"},
- {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"},
- {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"},
- {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"},
- {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"},
- {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"},
- {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"},
- {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"},
- {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"},
- {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"},
- {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"},
- {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"},
- {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"},
- {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"},
- {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"},
-]
-
-[[package]]
-name = "fsspec"
-version = "2024.6.1"
-description = "File-system specification"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"},
- {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"},
-]
-
-[package.dependencies]
-aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""}
-
-[package.extras]
-abfs = ["adlfs"]
-adl = ["adlfs"]
-arrow = ["pyarrow (>=1)"]
-dask = ["dask", "distributed"]
-dev = ["pre-commit", "ruff"]
-doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"]
-dropbox = ["dropbox", "dropboxdrivefs", "requests"]
-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
-fuse = ["fusepy"]
-gcs = ["gcsfs"]
-git = ["pygit2"]
-github = ["requests"]
-gs = ["gcsfs"]
-gui = ["panel"]
-hdfs = ["pyarrow (>=1)"]
-http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"]
-libarchive = ["libarchive-c"]
-oci = ["ocifs"]
-s3 = ["s3fs"]
-sftp = ["paramiko"]
-smb = ["smbprotocol"]
-ssh = ["paramiko"]
-test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"]
-test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"]
-test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"]
-tqdm = ["tqdm"]
-
-[[package]]
-name = "future"
-version = "1.0.0"
-description = "Clean single-source support for Python 3 and 2"
-optional = true
-python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
-files = [
- {file = "future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216"},
- {file = "future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05"},
-]
-
-[[package]]
-name = "gdown"
-version = "5.2.0"
-description = "Google Drive Public File/Folder Downloader"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "gdown-5.2.0-py3-none-any.whl", hash = "sha256:33083832d82b1101bdd0e9df3edd0fbc0e1c5f14c9d8c38d2a35bf1683b526d6"},
- {file = "gdown-5.2.0.tar.gz", hash = "sha256:2145165062d85520a3cd98b356c9ed522c5e7984d408535409fd46f94defc787"},
-]
-
-[package.dependencies]
-beautifulsoup4 = "*"
-filelock = "*"
-requests = {version = "*", extras = ["socks"]}
-tqdm = "*"
-
-[package.extras]
-test = ["build", "mypy", "pytest", "pytest-xdist", "ruff", "twine", "types-requests", "types-setuptools"]
-
-[[package]]
-name = "gitdb"
-version = "4.0.11"
-description = "Git Object Database"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"},
- {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"},
-]
-
-[package.dependencies]
-smmap = ">=3.0.1,<6"
-
-[[package]]
-name = "gitpython"
-version = "3.1.43"
-description = "GitPython is a Python library used to interact with Git repositories"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"},
- {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"},
-]
-
-[package.dependencies]
-gitdb = ">=4.0.1,<5"
-
-[package.extras]
-doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"]
-test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"]
-
-[[package]]
-name = "glfw"
-version = "2.7.0"
-description = "A ctypes-based wrapper for GLFW3."
-optional = true
-python-versions = "*"
-files = [
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-macosx_10_6_intel.whl", hash = "sha256:bd82849edcceda4e262bd1227afaa74b94f9f0731c1197863cd25c15bfc613fc"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-macosx_11_0_arm64.whl", hash = "sha256:56ea163c964bb0bc336def2d6a6a1bd42f9db4b870ef834ac77d7b7ee68b8dfc"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2010_i686.whl", hash = "sha256:463aab9e5567c83d8120556b3a845807c60950ed0218fc1283368f46f5ece331"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2010_x86_64.whl", hash = "sha256:a6f54188dfc349e5426b0ada84843f6eb35a3811d8dbf57ae49c448e7d683bb4"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2014_aarch64.whl", hash = "sha256:e33568b0aba2045a3d7555f22fcf83fafcacc7c2fc4cb995741894ea51e43ab6"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2014_x86_64.whl", hash = "sha256:d8630dd9673860c427abde5b79bbc348e02eccde8a3f2a802c5a2a4fb5d79fb8"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-win32.whl", hash = "sha256:ff92d14ac1c7afa9c5deb495c335b485868709880e6e080e99ace7026d74c756"},
- {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-win_amd64.whl", hash = "sha256:20d4b31a5a6a61fb787b25f8408204e0e248313cc500953071d13d30a2e5cc9d"},
- {file = "glfw-2.7.0.tar.gz", hash = "sha256:0e209ad38fa8c5be67ca590d7b17533d95ad1eb57d0a3f07b98131db69b79000"},
-]
-
-[package.extras]
-preview = ["glfw-preview"]
-
-[[package]]
-name = "gym-aloha"
-version = "0.1.1"
-description = "A gym environment for ALOHA"
-optional = true
-python-versions = "<4.0,>=3.10"
-files = [
- {file = "gym_aloha-0.1.1-py3-none-any.whl", hash = "sha256:2698037246dbb106828f0bc229b61007b0a21d5967c72cc373f7bc1083203584"},
- {file = "gym_aloha-0.1.1.tar.gz", hash = "sha256:614ae1cf116323e7b5ae2f0e9bd282c4f052aee15e839e5587ddce45995359bc"},
-]
-
-[package.dependencies]
-dm-control = ">=1.0.14"
-gymnasium = ">=0.29.1"
-imageio = {version = ">=2.34.0", extras = ["ffmpeg"]}
-mujoco = ">=2.3.7,<3.0.0"
-
-[package.extras]
-dev = ["debugpy (>=1.8.1)", "pre-commit (>=3.7.0)"]
-test = ["pytest (>=8.1.0)", "pytest-cov (>=5.0.0)"]
-
-[[package]]
-name = "gym-dora"
-version = "0.1.0"
-description = ""
-optional = true
-python-versions = "^3.10"
-files = []
-develop = false
-
-[package.dependencies]
-dora-rs = ">=0.3.4"
-gymnasium = ">=0.29.1"
-pyarrow = ">=12.0.0"
-
-[package.source]
-type = "git"
-url = "https://github.com/dora-rs/dora-lerobot.git"
-reference = "HEAD"
-resolved_reference = "7844fbdb97d467a4672be3eb102ebca96211e95b"
-subdirectory = "gym_dora"
-
-[[package]]
-name = "gym-pusht"
-version = "0.1.5"
-description = "A gymnasium environment for PushT."
-optional = true
-python-versions = "<4.0,>=3.10"
-files = [
- {file = "gym_pusht-0.1.5-py3-none-any.whl", hash = "sha256:d9e3ba5f44916dc4a802d71764b08f4e7e09bda256e25af9dda16e9364dc777f"},
- {file = "gym_pusht-0.1.5.tar.gz", hash = "sha256:981e135f6e0ca91e4ec63603e9551bc77cba989d06a2888ed31a1d68f7cbdae2"},
-]
-
-[package.dependencies]
-gymnasium = ">=0.29.1"
-opencv-python = ">=4.9.0"
-pygame = ">=2.5.2"
-pymunk = ">=6.6.0"
-scikit-image = ">=0.22.0"
-shapely = ">=2.0.3"
-
-[package.extras]
-dev = ["debugpy (>=1.8.1)", "pre-commit (>=3.7.0)"]
-test = ["pytest (>=8.1.0)", "pytest-cov (>=5.0.0)"]
-
-[[package]]
-name = "gym-xarm"
-version = "0.1.1"
-description = "A gym environment for xArm"
-optional = true
-python-versions = "<4.0,>=3.10"
-files = [
- {file = "gym_xarm-0.1.1-py3-none-any.whl", hash = "sha256:3bd7e3c1c5521ba80a56536f01a5e11321580704d72160355ce47a828a8808ad"},
- {file = "gym_xarm-0.1.1.tar.gz", hash = "sha256:e455524561b02d06b92a4f7d524f448d84a7484d9a2dbc78600e3c66240e0fb7"},
-]
-
-[package.dependencies]
-gymnasium = ">=0.29.1"
-gymnasium-robotics = ">=1.2.4"
-mujoco = ">=2.3.7,<3.0.0"
-
-[package.extras]
-dev = ["debugpy (>=1.8.1)", "pre-commit (>=3.7.0)"]
-test = ["pytest (>=8.1.0)", "pytest-cov (>=5.0.0)"]
-
-[[package]]
-name = "gymnasium"
-version = "0.29.1"
-description = "A standard API for reinforcement learning and a diverse set of reference environments (formerly Gym)."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "gymnasium-0.29.1-py3-none-any.whl", hash = "sha256:61c3384b5575985bb7f85e43213bcb40f36fcdff388cae6bc229304c71f2843e"},
- {file = "gymnasium-0.29.1.tar.gz", hash = "sha256:1a532752efcb7590478b1cc7aa04f608eb7a2fdad5570cd217b66b6a35274bb1"},
-]
-
-[package.dependencies]
-cloudpickle = ">=1.2.0"
-farama-notifications = ">=0.0.1"
-numpy = ">=1.21.0"
-typing-extensions = ">=4.3.0"
-
-[package.extras]
-accept-rom-license = ["autorom[accept-rom-license] (>=0.4.2,<0.5.0)"]
-all = ["box2d-py (==2.3.5)", "cython (<3)", "imageio (>=2.14.1)", "jax (>=0.4.0)", "jaxlib (>=0.4.0)", "lz4 (>=3.1.0)", "matplotlib (>=3.0)", "moviepy (>=1.0.0)", "mujoco (>=2.3.3)", "mujoco-py (>=2.1,<2.2)", "opencv-python (>=3.0)", "pygame (>=2.1.3)", "shimmy[atari] (>=0.1.0,<1.0)", "swig (==4.*)", "torch (>=1.0.0)"]
-atari = ["shimmy[atari] (>=0.1.0,<1.0)"]
-box2d = ["box2d-py (==2.3.5)", "pygame (>=2.1.3)", "swig (==4.*)"]
-classic-control = ["pygame (>=2.1.3)", "pygame (>=2.1.3)"]
-jax = ["jax (>=0.4.0)", "jaxlib (>=0.4.0)"]
-mujoco = ["imageio (>=2.14.1)", "mujoco (>=2.3.3)"]
-mujoco-py = ["cython (<3)", "cython (<3)", "mujoco-py (>=2.1,<2.2)", "mujoco-py (>=2.1,<2.2)"]
-other = ["lz4 (>=3.1.0)", "matplotlib (>=3.0)", "moviepy (>=1.0.0)", "opencv-python (>=3.0)", "torch (>=1.0.0)"]
-testing = ["pytest (==7.1.3)", "scipy (>=1.7.3)"]
-toy-text = ["pygame (>=2.1.3)", "pygame (>=2.1.3)"]
-
-[[package]]
-name = "gymnasium-robotics"
-version = "1.2.4"
-description = "Robotics environments for the Gymnasium repo."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "gymnasium-robotics-1.2.4.tar.gz", hash = "sha256:d304192b066f8b800599dfbe3d9d90bba9b761ee884472bdc4d05968a8bc61cb"},
- {file = "gymnasium_robotics-1.2.4-py3-none-any.whl", hash = "sha256:c2cb23e087ca0280ae6802837eb7b3a6d14e5bd24c00803ab09f015fcff3eef5"},
-]
-
-[package.dependencies]
-gymnasium = ">=0.26"
-imageio = "*"
-Jinja2 = ">=3.0.3"
-mujoco = ">=2.3.3,<3.0"
-numpy = ">=1.21.0"
-PettingZoo = ">=1.23.0"
-
-[package.extras]
-mujoco-py = ["cython (<3)", "mujoco-py (>=2.1,<2.2)"]
-testing = ["Jinja2 (>=3.0.3)", "PettingZoo (>=1.23.0)", "cython (<3)", "mujoco-py (>=2.1,<2.2)", "pytest (==7.0.1)"]
-
-[[package]]
-name = "h11"
-version = "0.14.0"
-description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
- {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
-]
-
-[[package]]
-name = "h5py"
-version = "3.12.1"
-description = "Read and write HDF5 files from Python"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "h5py-3.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f0f1a382cbf494679c07b4371f90c70391dedb027d517ac94fa2c05299dacda"},
- {file = "h5py-3.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cb65f619dfbdd15e662423e8d257780f9a66677eae5b4b3fc9dca70b5fd2d2a3"},
- {file = "h5py-3.12.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b15d8dbd912c97541312c0e07438864d27dbca857c5ad634de68110c6beb1c2"},
- {file = "h5py-3.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59685fe40d8c1fbbee088c88cd4da415a2f8bee5c270337dc5a1c4aa634e3307"},
- {file = "h5py-3.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:577d618d6b6dea3da07d13cc903ef9634cde5596b13e832476dd861aaf651f3e"},
- {file = "h5py-3.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ccd9006d92232727d23f784795191bfd02294a4f2ba68708825cb1da39511a93"},
- {file = "h5py-3.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ad8a76557880aed5234cfe7279805f4ab5ce16b17954606cca90d578d3e713ef"},
- {file = "h5py-3.12.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1473348139b885393125126258ae2d70753ef7e9cec8e7848434f385ae72069e"},
- {file = "h5py-3.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:018a4597f35092ae3fb28ee851fdc756d2b88c96336b8480e124ce1ac6fb9166"},
- {file = "h5py-3.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:3fdf95092d60e8130ba6ae0ef7a9bd4ade8edbe3569c13ebbaf39baefffc5ba4"},
- {file = "h5py-3.12.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:06a903a4e4e9e3ebbc8b548959c3c2552ca2d70dac14fcfa650d9261c66939ed"},
- {file = "h5py-3.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7b3b8f3b48717e46c6a790e3128d39c61ab595ae0a7237f06dfad6a3b51d5351"},
- {file = "h5py-3.12.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:050a4f2c9126054515169c49cb900949814987f0c7ae74c341b0c9f9b5056834"},
- {file = "h5py-3.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c4b41d1019322a5afc5082864dfd6359f8935ecd37c11ac0029be78c5d112c9"},
- {file = "h5py-3.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4d51919110a030913201422fb07987db4338eba5ec8c5a15d6fab8e03d443fc"},
- {file = "h5py-3.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:513171e90ed92236fc2ca363ce7a2fc6f2827375efcbb0cc7fbdd7fe11fecafc"},
- {file = "h5py-3.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:59400f88343b79655a242068a9c900001a34b63e3afb040bd7cdf717e440f653"},
- {file = "h5py-3.12.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e465aee0ec353949f0f46bf6c6f9790a2006af896cee7c178a8c3e5090aa32"},
- {file = "h5py-3.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba51c0c5e029bb5420a343586ff79d56e7455d496d18a30309616fdbeed1068f"},
- {file = "h5py-3.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:52ab036c6c97055b85b2a242cb540ff9590bacfda0c03dd0cf0661b311f522f8"},
- {file = "h5py-3.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2b8dd64f127d8b324f5d2cd1c0fd6f68af69084e9e47d27efeb9e28e685af3e"},
- {file = "h5py-3.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4532c7e97fbef3d029735db8b6f5bf01222d9ece41e309b20d63cfaae2fb5c4d"},
- {file = "h5py-3.12.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdf6d7936fa824acfa27305fe2d9f39968e539d831c5bae0e0d83ed521ad1ac"},
- {file = "h5py-3.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84342bffd1f82d4f036433e7039e241a243531a1d3acd7341b35ae58cdab05bf"},
- {file = "h5py-3.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:62be1fc0ef195891949b2c627ec06bc8e837ff62d5b911b6e42e38e0f20a897d"},
- {file = "h5py-3.12.1.tar.gz", hash = "sha256:326d70b53d31baa61f00b8aa5f95c2fcb9621a3ee8365d770c551a13dbbcbfdf"},
-]
-
-[package.dependencies]
-numpy = ">=1.19.3"
-
-[[package]]
-name = "hello-robot-stretch-body"
-version = "0.7.27"
-description = "Stretch Body low level Python API"
-optional = true
-python-versions = "*"
-files = [
- {file = "hello_robot_stretch_body-0.7.27-py3-none-any.whl", hash = "sha256:740e6abae4a0ba43b23ce7831129e3ef9356acd706ea73b5512873b04ba3c5f0"},
- {file = "hello_robot_stretch_body-0.7.27.tar.gz", hash = "sha256:dd289ea95f9df7be1306cbc26ac75037946db04f4f22503fc6e2741a57c68732"},
-]
-
-[package.dependencies]
-aioserial = "*"
-chime = "*"
-click = "*"
-cma = "*"
-colorama = "*"
-drawnow = "*"
-dynamixel-sdk = "*"
-filelock = "*"
-gitpython = "*"
-hello-robot-stretch-body-tools = ">=0.4.2"
-hello-robot-stretch-factory = ">=0.3.5"
-hello-robot-stretch-tool-share = ">=0.3.3"
-hello-robot-stretch-urdf = ">=0.0.19"
-inputs = "*"
-ipython = "*"
-jupyter = "*"
-matplotlib = "*"
-meshio = "*"
-nose = "*"
-numba = "*"
-numpy = ">=1.24"
-numpy-stl = "*"
-open3d = "*"
-opencv-contrib-python = "*"
-pandas = "*"
-pathlib = "*"
-pixel-ring = "*"
-psutil = "*"
-pyrealsense2 = "*"
-pyrender = "*"
-pyusb = "*"
-pyyaml = ">=5.1"
-renamed-opencv-python-inference-engine = {version = "*", markers = "python_version >= \"3.0.0\""}
-rplidar-roboticia = "*"
-scikit-image = "*"
-scipy = "*"
-snakeviz = "*"
-SpeechRecognition = "*"
-sympy = "*"
-transforms3d = ">=0.4.2"
-urchin = "*"
-urdf-parser-py = "*"
-
-[[package]]
-name = "hello-robot-stretch-body-tools"
-version = "0.7.13"
-description = "Stretch Body Tools"
-optional = true
-python-versions = "*"
-files = [
- {file = "hello_robot_stretch_body_tools-0.7.13-py3-none-any.whl", hash = "sha256:f12bd4ee40e48c11e68392e7fd91c3a752e87d44d864d1adb3998b30c0166e75"},
- {file = "hello_robot_stretch_body_tools-0.7.13.tar.gz", hash = "sha256:9ce65bfc9a53444b7622c3479ab45c6aa9369618eb3bf102ef1172474d1873b7"},
-]
-
-[package.dependencies]
-click = "*"
-cma = "*"
-colorama = "*"
-drawnow = "*"
-filelock = "*"
-gitpython = "*"
-inputs = "*"
-ipython = "*"
-matplotlib = "*"
-nose = "*"
-numpy = ">=1.24"
-open3d = "*"
-opencv-contrib-python = "*"
-packaging = "*"
-pandas = "*"
-pixel-ring = "*"
-pyaudio = "*"
-pyrealsense2 = "*"
-pyusb = "*"
-pyyaml = ">=5.1"
-rplidar-roboticia = "*"
-scikit-image = "*"
-scipy = "*"
-sh = "*"
-snakeviz = "*"
-SpeechRecognition = "*"
-sympy = "*"
-trimesh = "4.4.7"
-urchin = "*"
-xmltodict = "*"
-
-[[package]]
-name = "hello-robot-stretch-factory"
-version = "0.5.6"
-description = "Stretch Factory Tools"
-optional = true
-python-versions = "*"
-files = [
- {file = "hello-robot-stretch-factory-0.5.6.tar.gz", hash = "sha256:e2b060daf5eda699781cde96faf608b7ed3c234ac5b22317f028a69f889846de"},
- {file = "hello_robot_stretch_factory-0.5.6-py3-none-any.whl", hash = "sha256:09bb97bf1fc146855843af042684d1820d6b1775945dbc3e1cd44eff75be702f"},
-]
-
-[package.dependencies]
-future = "*"
-gitpython = "*"
-hello-robot-stretch-body = ">=0.4.26"
-pyserial = "*"
-python-xlib = "*"
-pyusb = "*"
-tabulate = "*"
-
-[[package]]
-name = "hello-robot-stretch-tool-share"
-version = "0.3.4"
-description = "Stretch end of arm tool interfaces"
-optional = true
-python-versions = "*"
-files = [
- {file = "hello_robot_stretch_tool_share-0.3.4-py3-none-any.whl", hash = "sha256:230d24f88a84cc983c019078911c579882d9c2c9e24129e5acbe1c756189a1d1"},
- {file = "hello_robot_stretch_tool_share-0.3.4.tar.gz", hash = "sha256:8e0a2cea088dcb50e41257aade5c6190964a0f1407f1f54f24d114ff31ecb2c6"},
-]
-
-[[package]]
-name = "hello-robot-stretch-urdf"
-version = "0.1.0"
-description = "Stretch URDF"
-optional = true
-python-versions = "*"
-files = [
- {file = "hello_robot_stretch_urdf-0.1.0-py3-none-any.whl", hash = "sha256:324f5ce0834b45b343e84bb8e8f5cbdd02f1315c6954856f0c68badb2b03e026"},
- {file = "hello_robot_stretch_urdf-0.1.0.tar.gz", hash = "sha256:51ed5984dbb6538e9f7cdc573b8a4a283118a13faaa06dc773c9bdda8bfe1034"},
-]
-
-[package.dependencies]
-urchin = "*"
-
-[[package]]
-name = "hf-transfer"
-version = "0.1.8"
-description = "Speed up file transfers with the Hugging Face Hub."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "hf_transfer-0.1.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70858f9e94286738ed300484a45beb5cfee6a7ddac4c5886f9c6fce7823ac5ab"},
- {file = "hf_transfer-0.1.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:38adc73f0a8526319d90f7cc5dc2d5e4bb66f487a513d94b98aa6725be732e4a"},
- {file = "hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d2f0c08198d8d899fe9d66e86aee2dd844bd7ce33888f261373fcec81d2a54"},
- {file = "hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1de2a4ef36f9e60b3d3bec00193c0aafd75771709f2ca51b9b162373f5af3d32"},
- {file = "hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e319269e3606a5ff2979296841766649ac73598a4a8eee2a968f86c8071fea5a"},
- {file = "hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f6026cf3be6a53ea42f92172f60c1c0675baaa9073f865e671b661dde5fd157"},
- {file = "hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f865c33ada5bd3650c2b46e59979f2d7755c3f517f8d0facc78576a0c7d26406"},
- {file = "hf_transfer-0.1.8-cp310-none-win32.whl", hash = "sha256:2054730e8d8ed21917c64be7199e06424b2bd08df1c43a72766afaed7992f2d3"},
- {file = "hf_transfer-0.1.8-cp310-none-win_amd64.whl", hash = "sha256:2b4f1a9446ba31170b5b1eca4e916504d18378a6b5fe959896bdac8a736a5ecb"},
- {file = "hf_transfer-0.1.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:e27c15fcc5869ad7e52bbc0bdec6106b288d1c463f8d2da92f28615a3b181361"},
- {file = "hf_transfer-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:871a0032d011ebc6409a73a8406b98b84ff2cd3ed7d9e1af8cdf4d660b9fab9b"},
- {file = "hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:686fa756e1e0214bb6327d33c66732c52274d94a8460beb50604ad988b391cf6"},
- {file = "hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:36a03b1b2911b0cf15b1b9d971a34b32dadcc4f2fd979aaff5979d6ce4017c34"},
- {file = "hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:079db90c81f41f4cf3227dfaaa855a9b8e9aef45bc7c2be29ce7232cd83ff881"},
- {file = "hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac08a4524127fdd14c234d4bcbe49d1c498acf5335c781714823179bcc8dc039"},
- {file = "hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:837432e73cb17274a6782b6216e8ce058aa325a475dc44a5a6a753d48b86d18a"},
- {file = "hf_transfer-0.1.8-cp311-none-win32.whl", hash = "sha256:b180f9823dde35aba9bc0f1d0c04ac8a873baebd3732a7ffe4f11940abc7df0d"},
- {file = "hf_transfer-0.1.8-cp311-none-win_amd64.whl", hash = "sha256:37907d2135cebcf8b6d419bb575148d89c224f16b69357f027bd29d0e85c6529"},
- {file = "hf_transfer-0.1.8-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baf948f4f493949309cbe60529620b9b0aef854a22b6e526753364acc57c09b6"},
- {file = "hf_transfer-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bce5c8bdefa478c5d5eaa646cc4ce1df5cfe764d98572ad0c6b8773e98d49f6"},
- {file = "hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54d6f8a1a86128d651a3799e1267c343d60f81f2c565d7c5416eb8e674e4cf0e"},
- {file = "hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f79fd1b0c2ed93efb4c5f684118d7a762ecdd218e170df8208c4e13d3dcd4959"},
- {file = "hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:414df35692670683bf5623498ef9d88a8df5d77e9516515da6e2b34d1054c11f"},
- {file = "hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c9798d5f951f66b96d40a7a53910260cb5874fda56cf5944dddb7c571f37ec3"},
- {file = "hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:060c661691f85a61392e57579c80eb64b5ee277434e81fb582f605c1c8ff05d5"},
- {file = "hf_transfer-0.1.8-cp312-none-win32.whl", hash = "sha256:f7840e32379820c3e1571a480238e05ea043e970c99d2e999578004a2eb17788"},
- {file = "hf_transfer-0.1.8-cp312-none-win_amd64.whl", hash = "sha256:9a3204ec423cc5e659872e8179f8704ad9ce2abb1e6a991f8838aedf1dc07830"},
- {file = "hf_transfer-0.1.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09949e86ad63ee139e463fd0dfaf401515ae70445854199f61d545514c65f744"},
- {file = "hf_transfer-0.1.8-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf1a74552845b93ea972e6e7131ef54e56056aa54137e93a40faf3fbcb2442ff"},
- {file = "hf_transfer-0.1.8-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959bcb3afb4ee6f2a07031a947dba98ec0b64c001bc914fbd8fc32e13a287162"},
- {file = "hf_transfer-0.1.8-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e01eecdb8162bd61dab9090fbd9f8034dd8b5755ef727a21ca8a057f80cb91ee"},
- {file = "hf_transfer-0.1.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50650a38e9d31f5ad8f010e4598bf304ecd99c17162e7d93f67e031571b864ee"},
- {file = "hf_transfer-0.1.8-cp37-none-win32.whl", hash = "sha256:e29b9d1d378138f2f4eae0e93ca94af3b5d45f4532eef69f1ab97fe06f9c9d9e"},
- {file = "hf_transfer-0.1.8-cp37-none-win_amd64.whl", hash = "sha256:cfd6cef43ae883103117a371f8ebae4e7f9637bc6fb480f1be5568e2fe22a8a7"},
- {file = "hf_transfer-0.1.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92a68f7a0043cca8a0de4decc760dca177530944cbab502afac503bd1b2fa01a"},
- {file = "hf_transfer-0.1.8-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e3138e408179f80a5480598e32f8e1abb564915cbde4d3bc8da52811c75dc3ea"},
- {file = "hf_transfer-0.1.8-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4544d148930ad34442d43b8fa911c8479c04a95b858b1d1f91e0b7da77082fad"},
- {file = "hf_transfer-0.1.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a851794b9f029965664f8c3002c957fccf21685e9397ceb4f9f19c986dee8ad3"},
- {file = "hf_transfer-0.1.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:791aaf87c5319ac83edb6ab2994b3db19924c49d6ff667dd3d8a610b455ff70a"},
- {file = "hf_transfer-0.1.8-cp38-none-win32.whl", hash = "sha256:8f71e5d35d3a3160dcca12fdcc8119033aeacaa6a32838a7ad9f9cb1008bbe58"},
- {file = "hf_transfer-0.1.8-cp38-none-win_amd64.whl", hash = "sha256:543287b4ceb1e25501580b99690f7f0df9d3631d29306f37cbd97e918c732944"},
- {file = "hf_transfer-0.1.8-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:7ce02a18bd0bb2343e707ac85b68c946bc37623ee24150c69158f6b2b2c7a98f"},
- {file = "hf_transfer-0.1.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64d7f8dbd64ba183ed1df75d47c84e075ff666ceaa335bff1de16b09eaac5b80"},
- {file = "hf_transfer-0.1.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e7858694e11419ae27e542fb8fc0d0e54d46ff7768fe73bc359d70b8f5aa578"},
- {file = "hf_transfer-0.1.8-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed116cd9d1edfa32c0136d7cb8e5f1afd2b32df43c49085d428f108fc8e1c8f"},
- {file = "hf_transfer-0.1.8-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e385d0da9c6b3472ab29285d2d46c9f9903205b8d108f88a82f3f85aafae0ab"},
- {file = "hf_transfer-0.1.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98f75fa4b86ef15433cd907807ac77d1fb39d7e7b790bfd39c7ae9c385bf0200"},
- {file = "hf_transfer-0.1.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a63ad947d2901425ac0a3ed70c3696dfde27fadb0482ed763bdd5cc946b278"},
- {file = "hf_transfer-0.1.8-cp39-none-win32.whl", hash = "sha256:3e74096915813ae842ea6a5bdf10c0fef960aa51a35a560955b3e61cdfe3db57"},
- {file = "hf_transfer-0.1.8-cp39-none-win_amd64.whl", hash = "sha256:05ea16307bf4a5eb097cbc6e5057e4eb5e080a138af23ef639fd38857723c288"},
- {file = "hf_transfer-0.1.8-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:928ff036c3e98e10dcfbdb4fcdfc4592d37a5cc8e365a7ba8dfd4337e849d675"},
- {file = "hf_transfer-0.1.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d49ba3ce67035f460ae1924fe2feafec155cb535eec7f31ed5109c19064cd294"},
- {file = "hf_transfer-0.1.8-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b01f5872c62cfee3ec9ca5c738818296f69f8adf84b4d8d15f2a5601d9dda339"},
- {file = "hf_transfer-0.1.8-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:659d4212d50847a5165666bf43d67727679b4f694ef9c413613cc27093136527"},
- {file = "hf_transfer-0.1.8.tar.gz", hash = "sha256:26d229468152e7a3ec12664cac86b8c2800695fd85f9c9a96677a775cc04f0b3"},
-]
-
-[[package]]
-name = "httpcore"
-version = "1.0.6"
-description = "A minimal low-level HTTP client."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"},
- {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"},
-]
-
-[package.dependencies]
-certifi = "*"
-h11 = ">=0.13,<0.15"
-
-[package.extras]
-asyncio = ["anyio (>=4.0,<5.0)"]
-http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (==1.*)"]
-trio = ["trio (>=0.22.0,<1.0)"]
-
-[[package]]
-name = "httpx"
-version = "0.27.2"
-description = "The next generation HTTP client."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
- {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
-]
-
-[package.dependencies]
-anyio = "*"
-certifi = "*"
-httpcore = "==1.*"
-idna = "*"
-sniffio = "*"
-
-[package.extras]
-brotli = ["brotli", "brotlicffi"]
-cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
-http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (==1.*)"]
-zstd = ["zstandard (>=0.18.0)"]
-
-[[package]]
-name = "huggingface-hub"
-version = "0.25.2"
-description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
-optional = false
-python-versions = ">=3.8.0"
-files = [
- {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"},
- {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"},
-]
-
-[package.dependencies]
-filelock = "*"
-fsspec = ">=2023.5.0"
-hf-transfer = {version = ">=0.1.4", optional = true, markers = "extra == \"hf-transfer\""}
-InquirerPy = {version = "0.3.4", optional = true, markers = "extra == \"cli\""}
-packaging = ">=20.9"
-pyyaml = ">=5.1"
-requests = "*"
-tqdm = ">=4.42.1"
-typing-extensions = ">=3.7.4.3"
-
-[package.extras]
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
-cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
-fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-hf-transfer = ["hf-transfer (>=0.1.4)"]
-inference = ["aiohttp", "minijinja (>=1.0)"]
-quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"]
-tensorflow = ["graphviz", "pydot", "tensorflow"]
-tensorflow-testing = ["keras (<3.0)", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
-torch = ["safetensors[torch]", "torch"]
-typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
-
-[[package]]
-name = "hydra-core"
-version = "1.3.2"
-description = "A framework for elegantly configuring complex applications"
-optional = false
-python-versions = "*"
-files = [
- {file = "hydra-core-1.3.2.tar.gz", hash = "sha256:8a878ed67216997c3e9d88a8e72e7b4767e81af37afb4ea3334b269a4390a824"},
- {file = "hydra_core-1.3.2-py3-none-any.whl", hash = "sha256:fa0238a9e31df3373b35b0bfb672c34cc92718d21f81311d8996a16de1141d8b"},
-]
-
-[package.dependencies]
-antlr4-python3-runtime = "==4.9.*"
-omegaconf = ">=2.2,<2.4"
-packaging = "*"
-
-[[package]]
-name = "identify"
-version = "2.6.1"
-description = "File identification library for Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"},
- {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"},
-]
-
-[package.extras]
-license = ["ukkonen"]
-
-[[package]]
-name = "idna"
-version = "3.10"
-description = "Internationalized Domain Names in Applications (IDNA)"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
- {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
-]
-
-[package.extras]
-all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
-
-[[package]]
-name = "imagecodecs"
-version = "2024.9.22"
-description = "Image transformation, compression, and decompression codecs"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "imagecodecs-2024.9.22-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:4cc21a59c6eb409bc3930dc642039eb1ff67a36b3f8d9e8c229eaede6b26557e"},
- {file = "imagecodecs-2024.9.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:321ff2e6907820bdbf8350d20733f5068bf53513476d522028117aefab55fc03"},
- {file = "imagecodecs-2024.9.22-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1608015c1e182e103d8b2ecda4a0e54595c3f846ca76fa484302283f24f3e7f"},
- {file = "imagecodecs-2024.9.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432e518d74ee5b9ac7d5b1022ed29a9fdabd0eab18201220e742fde631962cf8"},
- {file = "imagecodecs-2024.9.22-cp310-cp310-win32.whl", hash = "sha256:50d14caef565ccb4bdeb60e045b61f5d899d3caaf18e980923cdb50a181e4db2"},
- {file = "imagecodecs-2024.9.22-cp310-cp310-win_amd64.whl", hash = "sha256:d7220e9134c3abda5e9f720dcd810031b01b8ba1a71faa8055ab6b43b5056109"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:47259f811aea089d7cdf369e6617cb336b67359835102a45ee2a49f2a8e20624"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52007be4bc809104e5660805725196255cc091c248e465f588f9b4506544b886"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db9bcb5abd23522b119f619810cfa0217bf4756d1b8c1146a6a81635d7fb98d1"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:606f3c31387aa9019007cdf7e5e3fcfc4d04fc158f56a8e94340018988f5af69"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-win32.whl", hash = "sha256:180295983edbdd1220099ebe33718876d6cea6c68d9442a3771bba91de0be8c7"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-win_amd64.whl", hash = "sha256:915397c69f986da92608ec4af331b9682ad933f3d645a4e9f7b106530e57683c"},
- {file = "imagecodecs-2024.9.22-cp311-cp311-win_arm64.whl", hash = "sha256:15e7b21488d50f95980b1f865983a6963dad1c752d51cef5bfa76bdd1a325935"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:ba7e98ad714100ae892aeadea5dd636e31eb95663f7e71fb3654fc3399f8a312"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d1b59ffeaf1fdc06c5da1b8faf34a5f74f914c55a7148060b1746f7684552b6f"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9646cd9e8933c9a181387b159392d57832fb4f4b444f2d475a6ef7ba0ea8ef8"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd9c62286c5aa9cdd73551c7e55c7db04424968304e53ec9240915edb9f30e23"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-win32.whl", hash = "sha256:15959cf31ea8070741318fd0d5748b734e9001b83afd8bab6fe15236c27acba0"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-win_amd64.whl", hash = "sha256:44d51f5aae669fe1eba1474144c042fbb56f4286c072f37aa86941fed865270a"},
- {file = "imagecodecs-2024.9.22-cp312-cp312-win_arm64.whl", hash = "sha256:aa5f47ebef13f4c55b1ac24fafef5e7b340963a6a73af9d2cef2f9bfdf58bf97"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:d4bd89bc86c74439a7a828ce62e28d575db125f25cadc31bd877e2616ace2f0d"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c8c37f8cdeedd0e01f55b9588e82b2c7059bc1a0167ed8dd05166cad674bfbde"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9752c9af72ba372bbb0afca8a94f76b3096c1c54dcdb5cf18156fdc6b73403d2"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9ddd053c7f262ca1333fc23f45ece7b375ddca31a0761c46e1197691e895bc3"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-win32.whl", hash = "sha256:a5dc99af846febbaaf328f03518c2e2b0d0dfbe0a1a7b781361550605c7d4c58"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-win_amd64.whl", hash = "sha256:c8951d3449f81aaf0664a8f575d431906134973f9bec93073dfc8d8247db0a1a"},
- {file = "imagecodecs-2024.9.22-cp313-cp313-win_arm64.whl", hash = "sha256:ead06b23300b9f1958026d103aafe8eba272ff40abcb8c5db02d7711a5992cc9"},
- {file = "imagecodecs-2024.9.22-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:fa72958dee65ce40e25f9536408b04f72a95004fe4630faa7042cf6c6c29a1d1"},
- {file = "imagecodecs-2024.9.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4518e0edb5b369415bb7016097ff9cd1b2aed7a9960e21d2e616cf7e066af3fe"},
- {file = "imagecodecs-2024.9.22-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fbbe6f5929838adc954acdd51820602d1dfd8235f8b3eb3764be58e76c6626b7"},
- {file = "imagecodecs-2024.9.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58dbee11a50f2bc2e8c81f3bc1887f1b1328d61f09d9d8caa2e4050ae635fbe9"},
- {file = "imagecodecs-2024.9.22-cp39-cp39-win32.whl", hash = "sha256:fcbbba54d0d61b6ca188d28695b244c4c5a9caaf848173015d81c91d3c0d47cb"},
- {file = "imagecodecs-2024.9.22-cp39-cp39-win_amd64.whl", hash = "sha256:3e55abc2934442fe3055b4f8943ebe8ff6c7eb57f9f895c80ca1732f38632d9f"},
- {file = "imagecodecs-2024.9.22-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ec3ce35e6131853beb8a39e47e59b183d034c6e9476fafda38c7ab4d8d17e1f4"},
- {file = "imagecodecs-2024.9.22.tar.gz", hash = "sha256:fea0801b4008d25e971918d991397a351bbe76276cfa98eed2de54cb87e894a3"},
-]
-
-[package.dependencies]
-numpy = "*"
-
-[package.extras]
-all = ["matplotlib", "numcodecs", "tifffile"]
-test = ["bitshuffle", "blosc", "blosc2", "czifile", "lz4", "numcodecs", "pyliblzfse", "pytest", "python-lzf", "python-snappy", "tifffile", "zarr (<3)", "zopflipy", "zstd"]
-
-[[package]]
-name = "imageio"
-version = "2.35.1"
-description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "imageio-2.35.1-py3-none-any.whl", hash = "sha256:6eb2e5244e7a16b85c10b5c2fe0f7bf961b40fcb9f1a9fd1bd1d2c2f8fb3cd65"},
- {file = "imageio-2.35.1.tar.gz", hash = "sha256:4952dfeef3c3947957f6d5dedb1f4ca31c6e509a476891062396834048aeed2a"},
-]
-
-[package.dependencies]
-imageio-ffmpeg = {version = "*", optional = true, markers = "extra == \"ffmpeg\""}
-numpy = "*"
-pillow = ">=8.3.2"
-psutil = {version = "*", optional = true, markers = "extra == \"ffmpeg\""}
-
-[package.extras]
-all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"]
-all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"]
-build = ["wheel"]
-dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"]
-docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"]
-ffmpeg = ["imageio-ffmpeg", "psutil"]
-fits = ["astropy"]
-full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpy (>2)", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "rawpy", "sphinx (<6)", "tifffile", "wheel"]
-gdal = ["gdal"]
-itk = ["itk"]
-linting = ["black", "flake8"]
-pillow-heif = ["pillow-heif"]
-pyav = ["av"]
-rawpy = ["numpy (>2)", "rawpy"]
-test = ["fsspec[github]", "pytest", "pytest-cov"]
-tifffile = ["tifffile"]
-
-[[package]]
-name = "imageio-ffmpeg"
-version = "0.5.1"
-description = "FFMPEG wrapper for Python"
-optional = false
-python-versions = ">=3.5"
-files = [
- {file = "imageio-ffmpeg-0.5.1.tar.gz", hash = "sha256:0ed7a9b31f560b0c9d929c5291cd430edeb9bed3ce9a497480e536dd4326484c"},
- {file = "imageio_ffmpeg-0.5.1-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1460e84712b9d06910c1f7bb524096b0341d4b7844cea6c20e099d0a24e795b1"},
- {file = "imageio_ffmpeg-0.5.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:5289f75c7f755b499653f3209fea4efd1430cba0e39831c381aad2d458f7a316"},
- {file = "imageio_ffmpeg-0.5.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7fa9132a291d5eb28c44553550deb40cbdab831f2a614e55360301a6582eb205"},
- {file = "imageio_ffmpeg-0.5.1-py3-none-win32.whl", hash = "sha256:89efe2c79979d8174ba8476deb7f74d74c331caee3fb2b65ba2883bec0737625"},
- {file = "imageio_ffmpeg-0.5.1-py3-none-win_amd64.whl", hash = "sha256:1521e79e253bedbdd36a547e0cbd94a025ba0b558e17f08fea687d805a0e4698"},
-]
-
-[package.dependencies]
-setuptools = "*"
-
-[[package]]
-name = "importlib-metadata"
-version = "8.5.0"
-description = "Read metadata from Python packages"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
- {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
-]
-
-[package.dependencies]
-zipp = ">=3.20"
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=2.2)"]
-perf = ["ipython"]
-test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
-type = ["pytest-mypy"]
-
-[[package]]
-name = "iniconfig"
-version = "2.0.0"
-description = "brain-dead simple config-ini parsing"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
- {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
-]
-
-[[package]]
-name = "inputs"
-version = "0.5"
-description = "Cross-platform Python support for keyboards, mice and gamepads."
-optional = true
-python-versions = "*"
-files = [
- {file = "inputs-0.5-py2.py3-none-any.whl", hash = "sha256:13f894564e52134cf1e3862b1811da034875eb1f2b62e6021e3776e9669a96ec"},
- {file = "inputs-0.5.tar.gz", hash = "sha256:a31d5b96a3525f1232f326be9e7ce8ccaf873c6b1fb84d9f3c9bc3d79b23eae4"},
-]
-
-[[package]]
-name = "inquirerpy"
-version = "0.3.4"
-description = "Python port of Inquirer.js (A collection of common interactive command-line user interfaces)"
-optional = false
-python-versions = ">=3.7,<4.0"
-files = [
- {file = "InquirerPy-0.3.4-py3-none-any.whl", hash = "sha256:c65fdfbac1fa00e3ee4fb10679f4d3ed7a012abf4833910e63c295827fe2a7d4"},
- {file = "InquirerPy-0.3.4.tar.gz", hash = "sha256:89d2ada0111f337483cb41ae31073108b2ec1e618a49d7110b0d7ade89fc197e"},
-]
-
-[package.dependencies]
-pfzy = ">=0.3.1,<0.4.0"
-prompt-toolkit = ">=3.0.1,<4.0.0"
-
-[package.extras]
-docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst-parser (>=0.15.1,<0.16.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.4.0,<0.5.0)"]
-
-[[package]]
-name = "ipykernel"
-version = "6.29.5"
-description = "IPython Kernel for Jupyter"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"},
- {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"},
-]
-
-[package.dependencies]
-appnope = {version = "*", markers = "platform_system == \"Darwin\""}
-comm = ">=0.1.1"
-debugpy = ">=1.6.5"
-ipython = ">=7.23.1"
-jupyter-client = ">=6.1.12"
-jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
-matplotlib-inline = ">=0.1"
-nest-asyncio = "*"
-packaging = "*"
-psutil = "*"
-pyzmq = ">=24"
-tornado = ">=6.1"
-traitlets = ">=5.4.0"
-
-[package.extras]
-cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"]
-docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"]
-pyqt5 = ["pyqt5"]
-pyside6 = ["pyside6"]
-test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"]
-
-[[package]]
-name = "ipython"
-version = "8.28.0"
-description = "IPython: Productive Interactive Computing"
-optional = true
-python-versions = ">=3.10"
-files = [
- {file = "ipython-8.28.0-py3-none-any.whl", hash = "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35"},
- {file = "ipython-8.28.0.tar.gz", hash = "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a"},
-]
-
-[package.dependencies]
-decorator = "*"
-exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
-jedi = ">=0.16"
-matplotlib-inline = "*"
-pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""}
-prompt-toolkit = ">=3.0.41,<3.1.0"
-pygments = ">=2.4.0"
-stack-data = "*"
-traitlets = ">=5.13.0"
-typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""}
-
-[package.extras]
-all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"]
-black = ["black"]
-doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"]
-kernel = ["ipykernel"]
-matplotlib = ["matplotlib"]
-nbconvert = ["nbconvert"]
-nbformat = ["nbformat"]
-notebook = ["ipywidgets", "notebook"]
-parallel = ["ipyparallel"]
-qtconsole = ["qtconsole"]
-test = ["packaging", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"]
-test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"]
-
-[[package]]
-name = "ipywidgets"
-version = "8.1.5"
-description = "Jupyter interactive widgets"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "ipywidgets-8.1.5-py3-none-any.whl", hash = "sha256:3290f526f87ae6e77655555baba4f36681c555b8bdbbff430b70e52c34c86245"},
- {file = "ipywidgets-8.1.5.tar.gz", hash = "sha256:870e43b1a35656a80c18c9503bbf2d16802db1cb487eec6fab27d683381dde17"},
-]
-
-[package.dependencies]
-comm = ">=0.1.3"
-ipython = ">=6.1.0"
-jupyterlab-widgets = ">=3.0.12,<3.1.0"
-traitlets = ">=4.3.1"
-widgetsnbextension = ">=4.0.12,<4.1.0"
-
-[package.extras]
-test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"]
-
-[[package]]
-name = "isoduration"
-version = "20.11.0"
-description = "Operations with ISO 8601 durations"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"},
- {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"},
-]
-
-[package.dependencies]
-arrow = ">=0.15.0"
-
-[[package]]
-name = "itsdangerous"
-version = "2.2.0"
-description = "Safely pass data to untrusted environments and back."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"},
- {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"},
-]
-
-[[package]]
-name = "jedi"
-version = "0.19.1"
-description = "An autocompletion tool for Python that can be used for text editors."
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
- {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
-]
-
-[package.dependencies]
-parso = ">=0.8.3,<0.9.0"
-
-[package.extras]
-docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
-qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
-testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
-
-[[package]]
-name = "jinja2"
-version = "3.1.4"
-description = "A very fast and expressive template engine."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"},
- {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"},
-]
-
-[package.dependencies]
-MarkupSafe = ">=2.0"
-
-[package.extras]
-i18n = ["Babel (>=2.7)"]
-
-[[package]]
-name = "json5"
-version = "0.9.25"
-description = "A Python implementation of the JSON5 data format."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"},
- {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"},
-]
-
-[[package]]
-name = "jsonlines"
-version = "4.0.0"
-description = "Library with helpers for the jsonlines file format"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55"},
- {file = "jsonlines-4.0.0.tar.gz", hash = "sha256:0c6d2c09117550c089995247f605ae4cf77dd1533041d366351f6f298822ea74"},
-]
-
-[package.dependencies]
-attrs = ">=19.2.0"
-
-[[package]]
-name = "jsonpointer"
-version = "3.0.0"
-description = "Identify specific nodes in a JSON document (RFC 6901)"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"},
- {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"},
-]
-
-[[package]]
-name = "jsonschema"
-version = "4.23.0"
-description = "An implementation of JSON Schema validation for Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
- {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
-]
-
-[package.dependencies]
-attrs = ">=22.2.0"
-fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
-idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
-isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
-jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""}
-jsonschema-specifications = ">=2023.03.6"
-referencing = ">=0.28.4"
-rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
-rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""}
-rpds-py = ">=0.7.1"
-uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""}
-webcolors = {version = ">=24.6.0", optional = true, markers = "extra == \"format-nongpl\""}
-
-[package.extras]
-format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
-format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"]
-
-[[package]]
-name = "jsonschema-specifications"
-version = "2024.10.1"
-description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"},
- {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"},
-]
-
-[package.dependencies]
-referencing = ">=0.31.0"
-
-[[package]]
-name = "jupyter"
-version = "1.1.1"
-description = "Jupyter metapackage. Install all the Jupyter components in one go."
-optional = true
-python-versions = "*"
-files = [
- {file = "jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83"},
- {file = "jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a"},
-]
-
-[package.dependencies]
-ipykernel = "*"
-ipywidgets = "*"
-jupyter-console = "*"
-jupyterlab = "*"
-nbconvert = "*"
-notebook = "*"
-
-[[package]]
-name = "jupyter-client"
-version = "8.6.3"
-description = "Jupyter protocol implementation and client libraries"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"},
- {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"},
-]
-
-[package.dependencies]
-jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
-python-dateutil = ">=2.8.2"
-pyzmq = ">=23.0"
-tornado = ">=6.2"
-traitlets = ">=5.3"
-
-[package.extras]
-docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
-test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"]
-
-[[package]]
-name = "jupyter-console"
-version = "6.6.3"
-description = "Jupyter terminal console"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"},
- {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"},
-]
-
-[package.dependencies]
-ipykernel = ">=6.14"
-ipython = "*"
-jupyter-client = ">=7.0.0"
-jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
-prompt-toolkit = ">=3.0.30"
-pygments = "*"
-pyzmq = ">=17"
-traitlets = ">=5.4"
-
-[package.extras]
-test = ["flaky", "pexpect", "pytest"]
-
-[[package]]
-name = "jupyter-core"
-version = "5.7.2"
-description = "Jupyter core package. A base package on which Jupyter projects rely."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"},
- {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"},
-]
-
-[package.dependencies]
-platformdirs = ">=2.5"
-traitlets = ">=5.3"
-
-[package.extras]
-docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"]
-test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"]
-
-[[package]]
-name = "jupyter-events"
-version = "0.10.0"
-description = "Jupyter Event System library"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"},
- {file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"},
-]
-
-[package.dependencies]
-jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]}
-python-json-logger = ">=2.0.4"
-pyyaml = ">=5.3"
-referencing = "*"
-rfc3339-validator = "*"
-rfc3986-validator = ">=0.1.1"
-traitlets = ">=5.3"
-
-[package.extras]
-cli = ["click", "rich"]
-docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"]
-test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"]
-
-[[package]]
-name = "jupyter-lsp"
-version = "2.2.5"
-description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001"},
- {file = "jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da"},
-]
-
-[package.dependencies]
-jupyter-server = ">=1.1.2"
-
-[[package]]
-name = "jupyter-server"
-version = "2.14.2"
-description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyter_server-2.14.2-py3-none-any.whl", hash = "sha256:47ff506127c2f7851a17bf4713434208fc490955d0e8632e95014a9a9afbeefd"},
- {file = "jupyter_server-2.14.2.tar.gz", hash = "sha256:66095021aa9638ced276c248b1d81862e4c50f292d575920bbe960de1c56b12b"},
-]
-
-[package.dependencies]
-anyio = ">=3.1.0"
-argon2-cffi = ">=21.1"
-jinja2 = ">=3.0.3"
-jupyter-client = ">=7.4.4"
-jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
-jupyter-events = ">=0.9.0"
-jupyter-server-terminals = ">=0.4.4"
-nbconvert = ">=6.4.4"
-nbformat = ">=5.3.0"
-overrides = ">=5.0"
-packaging = ">=22.0"
-prometheus-client = ">=0.9"
-pywinpty = {version = ">=2.0.1", markers = "os_name == \"nt\""}
-pyzmq = ">=24"
-send2trash = ">=1.8.2"
-terminado = ">=0.8.3"
-tornado = ">=6.2.0"
-traitlets = ">=5.6.0"
-websocket-client = ">=1.7"
-
-[package.extras]
-docs = ["ipykernel", "jinja2", "jupyter-client", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"]
-test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0,<9)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.7)", "pytest-timeout", "requests"]
-
-[[package]]
-name = "jupyter-server-terminals"
-version = "0.5.3"
-description = "A Jupyter Server Extension Providing Terminals."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa"},
- {file = "jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269"},
-]
-
-[package.dependencies]
-pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""}
-terminado = ">=0.8.3"
-
-[package.extras]
-docs = ["jinja2", "jupyter-server", "mistune (<4.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"]
-test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"]
-
-[[package]]
-name = "jupyterlab"
-version = "4.2.5"
-description = "JupyterLab computational environment"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyterlab-4.2.5-py3-none-any.whl", hash = "sha256:73b6e0775d41a9fee7ee756c80f58a6bed4040869ccc21411dc559818874d321"},
- {file = "jupyterlab-4.2.5.tar.gz", hash = "sha256:ae7f3a1b8cb88b4f55009ce79fa7c06f99d70cd63601ee4aa91815d054f46f75"},
-]
-
-[package.dependencies]
-async-lru = ">=1.0.0"
-httpx = ">=0.25.0"
-ipykernel = ">=6.5.0"
-jinja2 = ">=3.0.3"
-jupyter-core = "*"
-jupyter-lsp = ">=2.0.0"
-jupyter-server = ">=2.4.0,<3"
-jupyterlab-server = ">=2.27.1,<3"
-notebook-shim = ">=0.2"
-packaging = "*"
-setuptools = ">=40.1.0"
-tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""}
-tornado = ">=6.2.0"
-traitlets = "*"
-
-[package.extras]
-dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"]
-docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"]
-docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"]
-test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"]
-upgrade-extension = ["copier (>=9,<10)", "jinja2-time (<0.3)", "pydantic (<3.0)", "pyyaml-include (<3.0)", "tomli-w (<2.0)"]
-
-[[package]]
-name = "jupyterlab-pygments"
-version = "0.3.0"
-description = "Pygments theme using JupyterLab CSS variables"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"},
- {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"},
-]
-
-[[package]]
-name = "jupyterlab-server"
-version = "2.27.3"
-description = "A set of server components for JupyterLab and JupyterLab like applications."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"},
- {file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"},
-]
-
-[package.dependencies]
-babel = ">=2.10"
-jinja2 = ">=3.0.3"
-json5 = ">=0.9.0"
-jsonschema = ">=4.18.0"
-jupyter-server = ">=1.21,<3"
-packaging = ">=21.3"
-requests = ">=2.31"
-
-[package.extras]
-docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"]
-openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"]
-test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0,<8)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"]
-
-[[package]]
-name = "jupyterlab-widgets"
-version = "3.0.13"
-description = "Jupyter interactive widgets for JupyterLab"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "jupyterlab_widgets-3.0.13-py3-none-any.whl", hash = "sha256:e3cda2c233ce144192f1e29914ad522b2f4c40e77214b0cc97377ca3d323db54"},
- {file = "jupyterlab_widgets-3.0.13.tar.gz", hash = "sha256:a2966d385328c1942b683a8cd96b89b8dd82c8b8f81dda902bb2bc06d46f5bed"},
-]
-
-[[package]]
-name = "kiwisolver"
-version = "1.4.7"
-description = "A fast implementation of the Cassowary constraint solver"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"},
- {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"},
- {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"},
- {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"},
- {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"},
- {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"},
- {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"},
- {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"},
- {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"},
- {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"},
- {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"},
- {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"},
- {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"},
- {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"},
- {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"},
- {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"},
- {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"},
- {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"},
- {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"},
- {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"},
- {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"},
- {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"},
- {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"},
- {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"},
- {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"},
- {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"},
- {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"},
- {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"},
- {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"},
- {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"},
- {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"},
- {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"},
- {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"},
- {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"},
- {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"},
- {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"},
- {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"},
- {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"},
- {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"},
- {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"},
- {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"},
- {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"},
- {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"},
- {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"},
- {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"},
- {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"},
- {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"},
- {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"},
- {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"},
- {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"},
- {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"},
- {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"},
- {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"},
- {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"},
- {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"},
- {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"},
- {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"},
- {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"},
- {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"},
- {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"},
- {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"},
- {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"},
- {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"},
- {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"},
- {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"},
- {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"},
- {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"},
- {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"},
- {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"},
- {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"},
- {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"},
- {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"},
- {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"},
- {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"},
- {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"},
- {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"},
- {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"},
- {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"},
- {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"},
- {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"},
- {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"},
- {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"},
- {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"},
- {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"},
- {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"},
- {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"},
- {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"},
- {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"},
- {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"},
- {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"},
- {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"},
- {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"},
- {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"},
- {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"},
- {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"},
- {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"},
- {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"},
- {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"},
- {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"},
- {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"},
- {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"},
- {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"},
- {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"},
- {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"},
- {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"},
- {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"},
- {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"},
- {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"},
- {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"},
- {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"},
- {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"},
- {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"},
- {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"},
- {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"},
-]
-
-[[package]]
-name = "labmaze"
-version = "1.0.6"
-description = "LabMaze: DeepMind Lab's text maze generator."
-optional = true
-python-versions = "*"
-files = [
- {file = "labmaze-1.0.6-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b2ddef976dfd8d992b19cfa6c633f2eba7576d759c2082da534e3f727479a84a"},
- {file = "labmaze-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:157efaa93228c8ccce5cae337902dd652093e0fba9d3a0f6506e4bee272bb66f"},
- {file = "labmaze-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3ce98b9541c5fe6a306e411e7d018121dd646f2c9978d763fad86f9f30c5f57"},
- {file = "labmaze-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e6433bd49bc541791de8191040526fddfebb77151620eb04203453f43ee486a"},
- {file = "labmaze-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:6a507fc35961f1b1479708e2716f65e0d0611cefb55f31a77be29ce2339b6fef"},
- {file = "labmaze-1.0.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a0c2cb9dec971814ea9c5d7150af15fa3964482131fa969e0afb94bd224348af"},
- {file = "labmaze-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2c6ba9538d819543f4be448d36b4926a3881e53646a2b331ebb5a1f353047d05"},
- {file = "labmaze-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70635d1cdb0147a02efb6b3f607a52cdc51723bc3dcc42717a0d4ef55fa0a987"},
- {file = "labmaze-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff472793238bd9b6dabea8094594d6074ad3c111455de3afcae72f6c40c6817e"},
- {file = "labmaze-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:2317e65e12fa3d1abecda7e0488dab15456cee8a2e717a586bfc8f02a91579e7"},
- {file = "labmaze-1.0.6-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e36b6fadcd78f22057b597c1c77823e806a0987b3bdfbf850e14b6b5b502075e"},
- {file = "labmaze-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d1a4f8de29c2c3d7f14163759b69cd3f237093b85334c983619c1db5403a223b"},
- {file = "labmaze-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a394f8bb857fcaa2884b809d63e750841c2662a106cfe8c045f2112d201ac7d5"},
- {file = "labmaze-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d17abb69d4dfc56183afb5c317e8b2eaca0587abb3aabd2326efd3143c81f4e"},
- {file = "labmaze-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5af997598cc46b1929d1c5a1febc32fd56c75874fe481a2a5982c65cee8450c9"},
- {file = "labmaze-1.0.6-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:a4c5bc6e56baa55ce63b97569afec2f80cab0f6b952752a131e1f83eed190a53"},
- {file = "labmaze-1.0.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3955f24fe5f708e1e97495b4cfe284b70ae4fd51be5e17b75a6fc04ffbd67bca"},
- {file = "labmaze-1.0.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed96ddc0bb8d66df36428c94db83949fd84a15867e8250763a4c5e3d82104c54"},
- {file = "labmaze-1.0.6-cp37-cp37m-win_amd64.whl", hash = "sha256:3bd0458a29e55aa09f146e28a168d2e00b8ccf19e2259a3f71154cfff3536b1d"},
- {file = "labmaze-1.0.6-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:33f5154edc83dff55a150e54b60c8582fdafc7ec45195049809cbcc01f5e8f34"},
- {file = "labmaze-1.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0971055ef2a5f7d8517fdc42b67c057093698f1eb911f46faa7018867b73fcc9"},
- {file = "labmaze-1.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de18d09680007302abf49111f3fe822d8435e4fbc4468b9ec07d50a78e267865"},
- {file = "labmaze-1.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f18126066db2218a52853c7dd490b4c3d8129fc22eb3a47eb23007524b911d53"},
- {file = "labmaze-1.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:f9aef09a76877342bb4d634b7e05f43b038a49c4f34adfb8f1b8ac57c29472f2"},
- {file = "labmaze-1.0.6-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5dd28899418f1b8b1c7d1e1b40a4593150a7cfa95ca91e23860b9785b82cc0ee"},
- {file = "labmaze-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:965569f37ee33090b4d4b3aa5aa7c9dcc4f62e2ae5d761e7f73ec76fc9d8aa96"},
- {file = "labmaze-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05eccfa98c0e781bc9f939076ae600b2e25ca736e123f2a530606aedec3b531c"},
- {file = "labmaze-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee8c94e0fb3fc2d8180214947245c1d74a3489349a9da90b868296e77a521e9"},
- {file = "labmaze-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:d486e9ca3a335ad628e3bd48a09c42f1aa5f51040952ef0fe32507afedcd694b"},
- {file = "labmaze-1.0.6.tar.gz", hash = "sha256:2e8de7094042a77d6972f1965cf5c9e8f971f1b34d225752f343190a825ebe73"},
-]
-
-[package.dependencies]
-absl-py = "*"
-numpy = ">=1.8.0"
-setuptools = "!=50.0.0"
-
-[[package]]
-name = "lazy-loader"
-version = "0.4"
-description = "Makes it easy to load subpackages and functions on demand."
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc"},
- {file = "lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1"},
-]
-
-[package.dependencies]
-packaging = "*"
-
-[package.extras]
-dev = ["changelist (==0.5)"]
-lint = ["pre-commit (==3.7.0)"]
-test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"]
-
-[[package]]
-name = "llvmlite"
-version = "0.43.0"
-description = "lightweight wrapper around basic LLVM functionality"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "llvmlite-0.43.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a289af9a1687c6cf463478f0fa8e8aa3b6fb813317b0d70bf1ed0759eab6f761"},
- {file = "llvmlite-0.43.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d4fd101f571a31acb1559ae1af30f30b1dc4b3186669f92ad780e17c81e91bc"},
- {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d434ec7e2ce3cc8f452d1cd9a28591745de022f931d67be688a737320dfcead"},
- {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6912a87782acdff6eb8bf01675ed01d60ca1f2551f8176a300a886f09e836a6a"},
- {file = "llvmlite-0.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:14f0e4bf2fd2d9a75a3534111e8ebeb08eda2f33e9bdd6dfa13282afacdde0ed"},
- {file = "llvmlite-0.43.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8d0618cb9bfe40ac38a9633f2493d4d4e9fcc2f438d39a4e854f39cc0f5f98"},
- {file = "llvmlite-0.43.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0a9a1a39d4bf3517f2af9d23d479b4175ead205c592ceeb8b89af48a327ea57"},
- {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1da416ab53e4f7f3bc8d4eeba36d801cc1894b9fbfbf2022b29b6bad34a7df2"},
- {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977525a1e5f4059316b183fb4fd34fa858c9eade31f165427a3977c95e3ee749"},
- {file = "llvmlite-0.43.0-cp311-cp311-win_amd64.whl", hash = "sha256:d5bd550001d26450bd90777736c69d68c487d17bf371438f975229b2b8241a91"},
- {file = "llvmlite-0.43.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f99b600aa7f65235a5a05d0b9a9f31150c390f31261f2a0ba678e26823ec38f7"},
- {file = "llvmlite-0.43.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:35d80d61d0cda2d767f72de99450766250560399edc309da16937b93d3b676e7"},
- {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eccce86bba940bae0d8d48ed925f21dbb813519169246e2ab292b5092aba121f"},
- {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6509e1507ca0760787a199d19439cc887bfd82226f5af746d6977bd9f66844"},
- {file = "llvmlite-0.43.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a2872ee80dcf6b5dbdc838763d26554c2a18aa833d31a2635bff16aafefb9c9"},
- {file = "llvmlite-0.43.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cd2a7376f7b3367019b664c21f0c61766219faa3b03731113ead75107f3b66c"},
- {file = "llvmlite-0.43.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18e9953c748b105668487b7c81a3e97b046d8abf95c4ddc0cd3c94f4e4651ae8"},
- {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74937acd22dc11b33946b67dca7680e6d103d6e90eeaaaf932603bec6fe7b03a"},
- {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9efc739cc6ed760f795806f67889923f7274276f0eb45092a1473e40d9b867"},
- {file = "llvmlite-0.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:47e147cdda9037f94b399bf03bfd8a6b6b1f2f90be94a454e3386f006455a9b4"},
- {file = "llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5"},
-]
-
-[[package]]
-name = "lxml"
-version = "5.3.0"
-description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
- {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"},
- {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"},
- {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"},
- {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"},
- {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"},
- {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"},
- {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"},
- {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"},
- {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"},
- {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"},
- {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"},
- {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"},
- {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"},
- {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"},
- {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"},
- {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"},
- {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"},
- {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"},
- {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"},
- {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"},
- {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"},
- {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"},
- {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"},
- {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"},
- {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"},
- {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"},
- {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"},
- {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"},
- {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"},
- {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"},
- {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"},
- {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"},
- {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"},
- {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"},
- {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"},
- {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"},
- {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"},
- {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"},
- {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"},
- {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"},
- {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"},
- {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"},
- {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"},
- {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"},
- {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"},
- {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"},
- {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"},
- {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"},
- {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"},
- {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"},
- {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"},
- {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"},
- {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"},
- {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"},
- {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"},
- {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"},
- {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"},
- {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"},
- {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"},
- {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"},
- {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"},
- {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"},
- {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"},
- {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"},
- {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"},
- {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"},
- {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"},
- {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"},
- {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"},
- {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"},
- {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"},
- {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"},
- {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"},
- {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"},
- {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"},
- {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"},
- {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"},
- {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"},
- {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"},
- {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"},
- {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"},
- {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"},
- {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"},
- {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"},
- {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"},
- {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"},
- {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"},
- {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"},
- {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"},
- {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"},
- {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"},
- {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"},
- {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"},
- {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"},
- {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"},
- {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"},
- {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"},
-]
-
-[package.extras]
-cssselect = ["cssselect (>=0.7)"]
-html-clean = ["lxml-html-clean"]
-html5 = ["html5lib"]
-htmlsoup = ["BeautifulSoup4"]
-source = ["Cython (>=3.0.11)"]
-
-[[package]]
-name = "markdown-it-py"
-version = "3.0.0"
-description = "Python port of markdown-it. Markdown parsing, done right!"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
- {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
-]
-
-[package.dependencies]
-mdurl = ">=0.1,<1.0"
-
-[package.extras]
-benchmarking = ["psutil", "pytest", "pytest-benchmark"]
-code-style = ["pre-commit (>=3.0,<4.0)"]
-compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
-linkify = ["linkify-it-py (>=1,<3)"]
-plugins = ["mdit-py-plugins"]
-profiling = ["gprof2dot"]
-rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
-testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
-
-[[package]]
-name = "markupsafe"
-version = "3.0.1"
-description = "Safely add untrusted strings to HTML/XML markup."
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"},
- {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"},
- {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"},
- {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"},
- {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"},
- {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"},
- {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"},
- {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"},
-]
-
-[[package]]
-name = "matplotlib"
-version = "3.9.2"
-description = "Python plotting package"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"},
- {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"},
- {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"},
- {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"},
- {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"},
- {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"},
- {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"},
- {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"},
- {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"},
- {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"},
- {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"},
- {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"},
- {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"},
- {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"},
- {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"},
- {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"},
- {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"},
- {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"},
- {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"},
- {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"},
- {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"},
- {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"},
- {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"},
- {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"},
- {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"},
- {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"},
- {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"},
- {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"},
- {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"},
- {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"},
- {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"},
- {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"},
- {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"},
- {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"},
- {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"},
- {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"},
- {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"},
- {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"},
- {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"},
- {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"},
-]
-
-[package.dependencies]
-contourpy = ">=1.0.1"
-cycler = ">=0.10"
-fonttools = ">=4.22.0"
-kiwisolver = ">=1.3.1"
-numpy = ">=1.23"
-packaging = ">=20.0"
-pillow = ">=8"
-pyparsing = ">=2.3.1"
-python-dateutil = ">=2.7"
-
-[package.extras]
-dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"]
-
-[[package]]
-name = "matplotlib-inline"
-version = "0.1.7"
-description = "Inline Matplotlib backend for Jupyter"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"},
- {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"},
-]
-
-[package.dependencies]
-traitlets = "*"
-
-[[package]]
-name = "mdurl"
-version = "0.1.2"
-description = "Markdown URL utilities"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
- {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
-]
-
-[[package]]
-name = "meshio"
-version = "5.3.5"
-description = "I/O for many mesh formats"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "meshio-5.3.5-py3-none-any.whl", hash = "sha256:0736c6e34ecc768f62f2cde5d8233a3529512a9399b25c68ea2ca0d5900cdc10"},
- {file = "meshio-5.3.5.tar.gz", hash = "sha256:f21f01abd9f29ba06ea119304b3d39e610421cfe93b9dd23362834919f87586d"},
-]
-
-[package.dependencies]
-numpy = ">=1.20.0"
-rich = "*"
-
-[package.extras]
-all = ["h5py", "netCDF4"]
-
-[[package]]
-name = "mistune"
-version = "3.0.2"
-description = "A sane and fast Markdown parser with useful plugins and renderers"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"},
- {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"},
-]
-
-[[package]]
-name = "mpmath"
-version = "1.3.0"
-description = "Python library for arbitrary-precision floating-point arithmetic"
-optional = false
-python-versions = "*"
-files = [
- {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
- {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
-]
-
-[package.extras]
-develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"]
-docs = ["sphinx"]
-gmpy = ["gmpy2 (>=2.1.0a4)"]
-tests = ["pytest (>=4.6)"]
-
-[[package]]
-name = "mujoco"
-version = "2.3.7"
-description = "MuJoCo Physics Simulator"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "mujoco-2.3.7-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:e8714a5ff6a1561b364b7b4648d4c0c8d13e751874cf7401c309b9d23fa9598b"},
- {file = "mujoco-2.3.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a934315f858a4e0c4b90a682fde519471cfdd7baa64435179da8cd20d4ae3f99"},
- {file = "mujoco-2.3.7-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:36513024330f88b5f9a43558efef5692b33599bffd5141029b690a27918ffcbe"},
- {file = "mujoco-2.3.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d4eede8ba8210fbd3d3cd1dbf69e24dd1541aa74c5af5b8adbbbf65504b6dba"},
- {file = "mujoco-2.3.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab85fafc9d5a091c712947573b7e694512d283876bf7f33ae3f8daad3a20c0db"},
- {file = "mujoco-2.3.7-cp310-cp310-win_amd64.whl", hash = "sha256:f8b7e13fef8c813d91b78f975ed0815157692777907ffa4b4be53a4edb75019b"},
- {file = "mujoco-2.3.7-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:779520216f72a8e370e3f0cdd71b45c3b7384c63331a3189194c930a3e7cff5c"},
- {file = "mujoco-2.3.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9d4018053879016282d27ab7a91e292c72d44efb5a88553feacfe5b843dde103"},
- {file = "mujoco-2.3.7-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:3149b16b8122ee62642474bfd2871064e8edc40235471cf5d84be3569afc0312"},
- {file = "mujoco-2.3.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c08660a8d52ef3efde76095f0991e807703a950c1e882d2bcd984b9a846626f7"},
- {file = "mujoco-2.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426af8965f8636d94a0f75740c3024a62b3e585020ee817ef5208ec844a1ad94"},
- {file = "mujoco-2.3.7-cp311-cp311-win_amd64.whl", hash = "sha256:215415a8e98a4b50625beae859079d5e0810b2039e50420f0ba81763c34abb59"},
- {file = "mujoco-2.3.7-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:8b78d14f4c60cea3c58e046bd4de453fb5b9b33aca6a25fc91d39a53f3a5342a"},
- {file = "mujoco-2.3.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5c6f5a51d6f537a4bf294cf73816f3a6384573f8f10a5452b044df2771412a96"},
- {file = "mujoco-2.3.7-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:ea8911e6047f92d7d775701f37e4c093971b6def3160f01d0b6926e29a7e962e"},
- {file = "mujoco-2.3.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7473a3de4dd1a8762d569ffb139196b4c5e7eca27d256df97b6cd4c66d2a09b2"},
- {file = "mujoco-2.3.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7e2d8f93d2495ec74efec84e5118ecc6e1d85157a844789c73c9ac9a4e28e"},
- {file = "mujoco-2.3.7-cp38-cp38-win_amd64.whl", hash = "sha256:720bc228a2023b3b0ed6af78f5b0f8ea36867be321d473321555c57dbf6e4e5b"},
- {file = "mujoco-2.3.7-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:855e79686366442aa410246043b44f7d842d3900d68fe7e37feb42147db9d707"},
- {file = "mujoco-2.3.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:98947f4a742d34d36f3c3f83e9167025bb0414bbaa4bd859b0673bdab9959963"},
- {file = "mujoco-2.3.7-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:d42818f2ee5d1632dbce31d136ed5ff868db54b04e4e9aca0c5a3ac329f8a90f"},
- {file = "mujoco-2.3.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9237e1ba14bced9449c31199e6d5be49547f3a4c99bc83b196af7ca45fd73b83"},
- {file = "mujoco-2.3.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b728ea638245b150e2650c5433e6952e0ed3798c63e47e264574270caea2a3"},
- {file = "mujoco-2.3.7-cp39-cp39-win_amd64.whl", hash = "sha256:9c721a5042b99d948d5f0296a534bcce3f142c777c4d7642f503a539513f3912"},
- {file = "mujoco-2.3.7.tar.gz", hash = "sha256:422041f1ce37c6d151fbced1048df626837e94fe3cd9f813585907046336a7d0"},
-]
-
-[package.dependencies]
-absl-py = "*"
-glfw = "*"
-numpy = "*"
-pyopengl = "*"
-
-[[package]]
-name = "multidict"
-version = "6.1.0"
-description = "multidict implementation"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"},
- {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"},
- {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"},
- {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"},
- {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"},
- {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"},
- {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"},
- {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"},
- {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"},
- {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"},
- {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"},
- {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"},
- {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"},
- {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"},
- {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"},
- {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"},
- {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"},
- {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"},
- {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"},
- {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"},
- {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"},
- {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"},
- {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"},
- {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"},
- {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"},
- {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"},
- {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"},
- {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"},
- {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"},
- {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"},
- {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"},
- {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"},
- {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"},
- {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"},
- {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"},
- {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"},
- {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"},
- {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"},
- {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"},
- {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"},
- {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"},
- {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"},
- {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"},
- {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"},
- {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"},
- {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"},
- {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"},
- {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"},
- {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"},
- {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"},
- {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"},
- {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"},
- {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"},
- {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"},
- {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"},
- {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"},
- {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"},
- {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"},
- {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"},
- {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"},
- {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"},
- {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"},
- {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"},
- {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"},
- {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"},
- {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"},
- {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"},
- {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"},
- {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"},
- {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"},
- {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"},
- {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"},
- {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"},
- {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"},
- {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"},
- {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"},
- {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"},
- {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"},
- {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"},
- {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"},
- {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"},
- {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"},
- {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"},
- {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"},
- {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"},
- {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"},
- {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"},
- {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"},
- {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"},
- {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"},
- {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"},
- {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"},
-]
-
-[package.dependencies]
-typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""}
-
-[[package]]
-name = "multiprocess"
-version = "0.70.16"
-description = "better multiprocessing and multithreading in Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"},
- {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"},
- {file = "multiprocess-0.70.16-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37b55f71c07e2d741374998c043b9520b626a8dddc8b3129222ca4f1a06ef67a"},
- {file = "multiprocess-0.70.16-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba8c31889abf4511c7308a8c52bb4a30b9d590e7f58523302ba00237702ca054"},
- {file = "multiprocess-0.70.16-pp39-pypy39_pp73-macosx_10_13_x86_64.whl", hash = "sha256:0dfd078c306e08d46d7a8d06fb120313d87aa43af60d66da43ffff40b44d2f41"},
- {file = "multiprocess-0.70.16-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e7b9d0f307cd9bd50851afaac0dba2cb6c44449efff697df7c7645f7d3f2be3a"},
- {file = "multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02"},
- {file = "multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a"},
- {file = "multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e"},
- {file = "multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435"},
- {file = "multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3"},
- {file = "multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1"},
-]
-
-[package.dependencies]
-dill = ">=0.3.8"
-
-[[package]]
-name = "nbclient"
-version = "0.10.0"
-description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor."
-optional = true
-python-versions = ">=3.8.0"
-files = [
- {file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"},
- {file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"},
-]
-
-[package.dependencies]
-jupyter-client = ">=6.1.12"
-jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
-nbformat = ">=5.1"
-traitlets = ">=5.4"
-
-[package.extras]
-dev = ["pre-commit"]
-docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"]
-test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"]
-
-[[package]]
-name = "nbconvert"
-version = "7.16.4"
-description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"},
- {file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"},
-]
-
-[package.dependencies]
-beautifulsoup4 = "*"
-bleach = "!=5.0.0"
-defusedxml = "*"
-jinja2 = ">=3.0"
-jupyter-core = ">=4.7"
-jupyterlab-pygments = "*"
-markupsafe = ">=2.0"
-mistune = ">=2.0.3,<4"
-nbclient = ">=0.5.0"
-nbformat = ">=5.7"
-packaging = "*"
-pandocfilters = ">=1.4.1"
-pygments = ">=2.4.1"
-tinycss2 = "*"
-traitlets = ">=5.1"
-
-[package.extras]
-all = ["flaky", "ipykernel", "ipython", "ipywidgets (>=7.5)", "myst-parser", "nbsphinx (>=0.2.12)", "playwright", "pydata-sphinx-theme", "pyqtwebengine (>=5.15)", "pytest (>=7)", "sphinx (==5.0.2)", "sphinxcontrib-spelling", "tornado (>=6.1)"]
-docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"]
-qtpdf = ["pyqtwebengine (>=5.15)"]
-qtpng = ["pyqtwebengine (>=5.15)"]
-serve = ["tornado (>=6.1)"]
-test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest (>=7)"]
-webpdf = ["playwright"]
-
-[[package]]
-name = "nbformat"
-version = "5.10.4"
-description = "The Jupyter Notebook format"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"},
- {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"},
-]
-
-[package.dependencies]
-fastjsonschema = ">=2.15"
-jsonschema = ">=2.6"
-jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0"
-traitlets = ">=5.1"
-
-[package.extras]
-docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
-test = ["pep440", "pre-commit", "pytest", "testpath"]
-
-[[package]]
-name = "nest-asyncio"
-version = "1.6.0"
-description = "Patch asyncio to allow nested event loops"
-optional = true
-python-versions = ">=3.5"
-files = [
- {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"},
- {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"},
-]
-
-[[package]]
-name = "networkx"
-version = "3.4"
-description = "Python package for creating and manipulating graphs and networks"
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "networkx-3.4-py3-none-any.whl", hash = "sha256:46dad0ec74a825a968e2b36c37ef5b91faa3868f017b2283d9cbff33112222ce"},
- {file = "networkx-3.4.tar.gz", hash = "sha256:1269b90f8f0d3a4095f016f49650f35ac169729f49b69d0572b2bb142748162b"},
-]
-
-[package.extras]
-default = ["matplotlib (>=3.7)", "numpy (>=1.24)", "pandas (>=2.0)", "scipy (>=1.10,!=1.11.0,!=1.11.1)"]
-developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"]
-doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.15)", "sphinx (>=7.3)", "sphinx-gallery (>=0.16)", "texext (>=0.6.7)"]
-example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=1.9)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"]
-extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"]
-test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"]
-
-[[package]]
-name = "nodeenv"
-version = "1.9.1"
-description = "Node.js virtual environment builder"
-optional = true
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-files = [
- {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"},
- {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"},
-]
-
-[[package]]
-name = "nose"
-version = "1.3.7"
-description = "nose extends unittest to make testing easier"
-optional = true
-python-versions = "*"
-files = [
- {file = "nose-1.3.7-py2-none-any.whl", hash = "sha256:dadcddc0aefbf99eea214e0f1232b94f2fa9bd98fa8353711dacb112bfcbbb2a"},
- {file = "nose-1.3.7-py3-none-any.whl", hash = "sha256:9ff7c6cc443f8c51994b34a667bbcf45afd6d945be7477b52e97516fd17c53ac"},
- {file = "nose-1.3.7.tar.gz", hash = "sha256:f1bffef9cbc82628f6e7d7b40d7e255aefaa1adb6a1b1d26c69a8b79e6208a98"},
-]
-
-[[package]]
-name = "notebook"
-version = "7.2.2"
-description = "Jupyter Notebook - A web-based notebook environment for interactive computing"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "notebook-7.2.2-py3-none-any.whl", hash = "sha256:c89264081f671bc02eec0ed470a627ed791b9156cad9285226b31611d3e9fe1c"},
- {file = "notebook-7.2.2.tar.gz", hash = "sha256:2ef07d4220421623ad3fe88118d687bc0450055570cdd160814a59cf3a1c516e"},
-]
-
-[package.dependencies]
-jupyter-server = ">=2.4.0,<3"
-jupyterlab = ">=4.2.0,<4.3"
-jupyterlab-server = ">=2.27.1,<3"
-notebook-shim = ">=0.2,<0.3"
-tornado = ">=6.2.0"
-
-[package.extras]
-dev = ["hatch", "pre-commit"]
-docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
-test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"]
-
-[[package]]
-name = "notebook-shim"
-version = "0.2.4"
-description = "A shim layer for notebook traits and config"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"},
- {file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"},
-]
-
-[package.dependencies]
-jupyter-server = ">=1.8,<3"
-
-[package.extras]
-test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"]
-
-[[package]]
-name = "numba"
-version = "0.60.0"
-description = "compiling Python code using LLVM"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "numba-0.60.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d761de835cd38fb400d2c26bb103a2726f548dc30368853121d66201672e651"},
- {file = "numba-0.60.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:159e618ef213fba758837f9837fb402bbe65326e60ba0633dbe6c7f274d42c1b"},
- {file = "numba-0.60.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1527dc578b95c7c4ff248792ec33d097ba6bef9eda466c948b68dfc995c25781"},
- {file = "numba-0.60.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe0b28abb8d70f8160798f4de9d486143200f34458d34c4a214114e445d7124e"},
- {file = "numba-0.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:19407ced081d7e2e4b8d8c36aa57b7452e0283871c296e12d798852bc7d7f198"},
- {file = "numba-0.60.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a17b70fc9e380ee29c42717e8cc0bfaa5556c416d94f9aa96ba13acb41bdece8"},
- {file = "numba-0.60.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fb02b344a2a80efa6f677aa5c40cd5dd452e1b35f8d1c2af0dfd9ada9978e4b"},
- {file = "numba-0.60.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f4fde652ea604ea3c86508a3fb31556a6157b2c76c8b51b1d45eb40c8598703"},
- {file = "numba-0.60.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4142d7ac0210cc86432b818338a2bc368dc773a2f5cf1e32ff7c5b378bd63ee8"},
- {file = "numba-0.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:cac02c041e9b5bc8cf8f2034ff6f0dbafccd1ae9590dc146b3a02a45e53af4e2"},
- {file = "numba-0.60.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7da4098db31182fc5ffe4bc42c6f24cd7d1cb8a14b59fd755bfee32e34b8404"},
- {file = "numba-0.60.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38d6ea4c1f56417076ecf8fc327c831ae793282e0ff51080c5094cb726507b1c"},
- {file = "numba-0.60.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:62908d29fb6a3229c242e981ca27e32a6e606cc253fc9e8faeb0e48760de241e"},
- {file = "numba-0.60.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ebaa91538e996f708f1ab30ef4d3ddc344b64b5227b67a57aa74f401bb68b9d"},
- {file = "numba-0.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:f75262e8fe7fa96db1dca93d53a194a38c46da28b112b8a4aca168f0df860347"},
- {file = "numba-0.60.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:01ef4cd7d83abe087d644eaa3d95831b777aa21d441a23703d649e06b8e06b74"},
- {file = "numba-0.60.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:819a3dfd4630d95fd574036f99e47212a1af41cbcb019bf8afac63ff56834449"},
- {file = "numba-0.60.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b983bd6ad82fe868493012487f34eae8bf7dd94654951404114f23c3466d34b"},
- {file = "numba-0.60.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c151748cd269ddeab66334bd754817ffc0cabd9433acb0f551697e5151917d25"},
- {file = "numba-0.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:3031547a015710140e8c87226b4cfe927cac199835e5bf7d4fe5cb64e814e3ab"},
- {file = "numba-0.60.0.tar.gz", hash = "sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16"},
-]
-
-[package.dependencies]
-llvmlite = "==0.43.*"
-numpy = ">=1.22,<2.1"
-
-[[package]]
-name = "numcodecs"
-version = "0.13.1"
-description = "A Python package providing buffer compression and transformation codecs for use in data storage and communication applications."
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "numcodecs-0.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:96add4f783c5ce57cc7e650b6cac79dd101daf887c479a00a29bc1487ced180b"},
- {file = "numcodecs-0.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:237b7171609e868a20fd313748494444458ccd696062f67e198f7f8f52000c15"},
- {file = "numcodecs-0.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96e42f73c31b8c24259c5fac6adba0c3ebf95536e37749dc6c62ade2989dca28"},
- {file = "numcodecs-0.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:eda7d7823c9282e65234731fd6bd3986b1f9e035755f7fed248d7d366bb291ab"},
- {file = "numcodecs-0.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2eda97dd2f90add98df6d295f2c6ae846043396e3d51a739ca5db6c03b5eb666"},
- {file = "numcodecs-0.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a86f5367af9168e30f99727ff03b27d849c31ad4522060dde0bce2923b3a8bc"},
- {file = "numcodecs-0.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233bc7f26abce24d57e44ea8ebeb5cd17084690b4e7409dd470fdb75528d615f"},
- {file = "numcodecs-0.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:796b3e6740107e4fa624cc636248a1580138b3f1c579160f260f76ff13a4261b"},
- {file = "numcodecs-0.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5195bea384a6428f8afcece793860b1ab0ae28143c853f0b2b20d55a8947c917"},
- {file = "numcodecs-0.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3501a848adaddce98a71a262fee15cd3618312692aa419da77acd18af4a6a3f6"},
- {file = "numcodecs-0.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2230484e6102e5fa3cc1a5dd37ca1f92dfbd183d91662074d6f7574e3e8f53"},
- {file = "numcodecs-0.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:e5db4824ebd5389ea30e54bc8aeccb82d514d28b6b68da6c536b8fa4596f4bca"},
- {file = "numcodecs-0.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7a60d75179fd6692e301ddfb3b266d51eb598606dcae7b9fc57f986e8d65cb43"},
- {file = "numcodecs-0.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f593c7506b0ab248961a3b13cb148cc6e8355662ff124ac591822310bc55ecf"},
- {file = "numcodecs-0.13.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80d3071465f03522e776a31045ddf2cfee7f52df468b977ed3afdd7fe5869701"},
- {file = "numcodecs-0.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:90d3065ae74c9342048ae0046006f99dcb1388b7288da5a19b3bddf9c30c3176"},
- {file = "numcodecs-0.13.1.tar.gz", hash = "sha256:a3cf37881df0898f3a9c0d4477df88133fe85185bffe57ba31bcc2fa207709bc"},
-]
-
-[package.dependencies]
-numpy = ">=1.7"
-
-[package.extras]
-docs = ["mock", "numpydoc", "pydata-sphinx-theme", "sphinx", "sphinx-issues"]
-msgpack = ["msgpack"]
-pcodec = ["pcodec (>=0.2.0)"]
-test = ["coverage", "pytest", "pytest-cov"]
-test-extras = ["importlib-metadata"]
-zfpy = ["numpy (<2.0.0)", "zfpy (>=1.0.0)"]
-
-[[package]]
-name = "numpy"
-version = "1.26.4"
-description = "Fundamental package for array computing in Python"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
- {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
- {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"},
- {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"},
- {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"},
- {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"},
- {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"},
- {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"},
- {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"},
- {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"},
- {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"},
- {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"},
- {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"},
- {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"},
- {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"},
- {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"},
- {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"},
- {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"},
- {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"},
- {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"},
- {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"},
- {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"},
- {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"},
- {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"},
- {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"},
- {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"},
- {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"},
- {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"},
- {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"},
- {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"},
- {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"},
- {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"},
- {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"},
- {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"},
- {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"},
- {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"},
-]
-
-[[package]]
-name = "numpy-stl"
-version = "3.1.2"
-description = "Library to make reading, writing and modifying both binary and ascii STL files easy."
-optional = true
-python-versions = ">3.6.0"
-files = [
- {file = "numpy_stl-3.1.2-py3-none-any.whl", hash = "sha256:a55288340c837378bf44753a1c595c6823312995acda97f27ed04db4ff1d25f3"},
- {file = "numpy_stl-3.1.2.tar.gz", hash = "sha256:72b46950dfa3642df1c7b873cfa78a548533724b907478c567db42fdf57ee3d2"},
-]
-
-[package.dependencies]
-numpy = "*"
-python-utils = ">=3.4.5"
-
-[[package]]
-name = "nvidia-cublas-cu12"
-version = "12.1.3.1"
-description = "CUBLAS native runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"},
- {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"},
-]
-
-[[package]]
-name = "nvidia-cuda-cupti-cu12"
-version = "12.1.105"
-description = "CUDA profiling tools runtime libs."
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"},
- {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"},
-]
-
-[[package]]
-name = "nvidia-cuda-nvrtc-cu12"
-version = "12.1.105"
-description = "NVRTC native runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"},
- {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"},
-]
-
-[[package]]
-name = "nvidia-cuda-runtime-cu12"
-version = "12.1.105"
-description = "CUDA Runtime native Libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"},
- {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"},
-]
-
-[[package]]
-name = "nvidia-cudnn-cu12"
-version = "9.1.0.70"
-description = "cuDNN runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f"},
- {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a"},
-]
-
-[package.dependencies]
-nvidia-cublas-cu12 = "*"
-
-[[package]]
-name = "nvidia-cufft-cu12"
-version = "11.0.2.54"
-description = "CUFFT native runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"},
- {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"},
-]
-
-[[package]]
-name = "nvidia-curand-cu12"
-version = "10.3.2.106"
-description = "CURAND native runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"},
- {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"},
-]
-
-[[package]]
-name = "nvidia-cusolver-cu12"
-version = "11.4.5.107"
-description = "CUDA solver native runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"},
- {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"},
-]
-
-[package.dependencies]
-nvidia-cublas-cu12 = "*"
-nvidia-cusparse-cu12 = "*"
-nvidia-nvjitlink-cu12 = "*"
-
-[[package]]
-name = "nvidia-cusparse-cu12"
-version = "12.1.0.106"
-description = "CUSPARSE native runtime libraries"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"},
- {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"},
-]
-
-[package.dependencies]
-nvidia-nvjitlink-cu12 = "*"
-
-[[package]]
-name = "nvidia-nccl-cu12"
-version = "2.20.5"
-description = "NVIDIA Collective Communication Library (NCCL) Runtime"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"},
- {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"},
-]
-
-[[package]]
-name = "nvidia-nvjitlink-cu12"
-version = "12.6.77"
-description = "Nvidia JIT LTO Library"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"},
- {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"},
- {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"},
-]
-
-[[package]]
-name = "nvidia-nvtx-cu12"
-version = "12.1.105"
-description = "NVIDIA Tools Extension"
-optional = false
-python-versions = ">=3"
-files = [
- {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"},
- {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"},
-]
-
-[[package]]
-name = "omegaconf"
-version = "2.3.0"
-description = "A flexible configuration library"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b"},
- {file = "omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7"},
-]
-
-[package.dependencies]
-antlr4-python3-runtime = "==4.9.*"
-PyYAML = ">=5.1.0"
-
-[[package]]
-name = "open3d"
-version = "0.18.0"
-description = "Open3D: A Modern Library for 3D Data Processing."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "open3d-0.18.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:48ee627a142a5453c4a2869b529310acb6f6b2507989cb9199c56e75796c575e"},
- {file = "open3d-0.18.0-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:9f3df5e8e8fe514b8285d05e43a4a3d57243d42d5c1dc9212adf8f18b6ab59b4"},
- {file = "open3d-0.18.0-cp310-cp310-manylinux_2_27_aarch64.whl", hash = "sha256:b9c8c8059cb92cd8b73c287385eeddf46195f2609ac7052302d6ac844a373dbf"},
- {file = "open3d-0.18.0-cp310-cp310-manylinux_2_27_x86_64.whl", hash = "sha256:f649d5d58090f73a337895fb0022c7b05c00f47f704b5722b103cceba04cc870"},
- {file = "open3d-0.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:48cdf2af3051320140d198f5d3ea3a85eeb3355e7a989a835b611b16589b9646"},
- {file = "open3d-0.18.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:b35a68b9fef3e963266db3bb15fbfef20e05787bc61192f61725fde5215f3560"},
- {file = "open3d-0.18.0-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:2182b818dcd3290dd2ddb0021ad0453bfda99098c931d5b2fc636a341cb3ca70"},
- {file = "open3d-0.18.0-cp311-cp311-manylinux_2_27_aarch64.whl", hash = "sha256:882f1e5039a3c1c5ec05183eb650537fd7431238b7ccb2b742ca5479f02f705b"},
- {file = "open3d-0.18.0-cp311-cp311-manylinux_2_27_x86_64.whl", hash = "sha256:8e3d1d1900a8f4d956f6819c246c78081725b9b0888f8549d2a7a49c8daa1303"},
- {file = "open3d-0.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:2da5da6c9eb9227baee6fe98baa992233aca36b83ec9e7d4093c77e762db60e6"},
- {file = "open3d-0.18.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:877e67237f2a97f8219870108eecf1ff447a81b0fcf1d2eacea246c9619fc55c"},
- {file = "open3d-0.18.0-cp38-cp38-macosx_13_0_arm64.whl", hash = "sha256:0ec03fcd48a939ec105896e0d02a9d006e8328c60491a0647b9a4fe5d9e4117d"},
- {file = "open3d-0.18.0-cp38-cp38-manylinux_2_27_aarch64.whl", hash = "sha256:477ed692bafd0ed591676d78bcb898bb2b684dcaa2886befe29e1b19d38a7c6d"},
- {file = "open3d-0.18.0-cp38-cp38-manylinux_2_27_x86_64.whl", hash = "sha256:46b9c1b900716771827b78006cfd18489b5327eabda8cd3d01e028b8173f4301"},
- {file = "open3d-0.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:d745affd0c7c765ed30ae9010abc4cfa80980b2c9f39a4f8678e8a9ef41ce089"},
- {file = "open3d-0.18.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:cce30304dfde3b9c0fbfca21687cf0e5280bcbabe2920d2c804ab352bfe610a5"},
- {file = "open3d-0.18.0-cp39-cp39-macosx_13_0_universal2.whl", hash = "sha256:ba5e07ca9a3ec6c70393bd2c5c707455a2e9c54209ccccca15ecf03834efd353"},
- {file = "open3d-0.18.0-cp39-cp39-manylinux_2_27_aarch64.whl", hash = "sha256:23a3bf135c7e69d4116f54b1ff78f58846245b5e70640b291981cee9e49a53d7"},
- {file = "open3d-0.18.0-cp39-cp39-manylinux_2_27_x86_64.whl", hash = "sha256:7d05fd6eedf75136cfbed24983da30bdfd08a6c4b1f968bf80ab84efc1fac861"},
- {file = "open3d-0.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:41be2d652f1b9feed9efb8775b29368ece0b4328ba6e90278486ff7643c6d480"},
-]
-
-[package.dependencies]
-configargparse = "*"
-dash = ">=2.6.0"
-nbformat = ">=5.7.0"
-numpy = ">=1.18.0"
-werkzeug = ">=2.2.3"
-
-[[package]]
-name = "opencv-contrib-python"
-version = "4.10.0.84"
-description = "Wrapper package for OpenCV python bindings."
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "opencv-contrib-python-4.10.0.84.tar.gz", hash = "sha256:4a3eae0ed9cadf1abe9293a6938a25a540e2fd6d7fc308595caa5896c8b36a0c"},
- {file = "opencv_contrib_python-4.10.0.84-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:ee4b0919026d8c533aeb69b16c6ec4a891a2f6844efaa14121bf68838753209c"},
- {file = "opencv_contrib_python-4.10.0.84-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:dea80d4db73b8acccf9e16b5744bf3654f47b22745074263f0a6c10de26c5ef5"},
- {file = "opencv_contrib_python-4.10.0.84-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:040575b69e4f3aa761676bace4e3d1b8485fbfaf77ef77b266ab6bda5a3b5e9b"},
- {file = "opencv_contrib_python-4.10.0.84-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a261223db41f6e512d76deaf21c8fcfb4fbbcbc2de62ca7f74a05f2c9ee489ef"},
- {file = "opencv_contrib_python-4.10.0.84-cp37-abi3-win32.whl", hash = "sha256:2a36257ec1375d1bec2a62177ea39828ff9804de6831ee39646bdc875c343cec"},
- {file = "opencv_contrib_python-4.10.0.84-cp37-abi3-win_amd64.whl", hash = "sha256:47ec3160dae75f70e099b286d1a2e086d20dac8b06e759f60eaf867e6bdecba7"},
-]
-
-[package.dependencies]
-numpy = [
- {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
- {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
- {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
- {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
-]
-
-[[package]]
-name = "opencv-python"
-version = "4.10.0.84"
-description = "Wrapper package for OpenCV python bindings."
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "opencv-python-4.10.0.84.tar.gz", hash = "sha256:72d234e4582e9658ffea8e9cae5b63d488ad06994ef12d81dc303b17472f3526"},
- {file = "opencv_python-4.10.0.84-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:fc182f8f4cda51b45f01c64e4cbedfc2f00aff799debebc305d8d0210c43f251"},
- {file = "opencv_python-4.10.0.84-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:71e575744f1d23f79741450254660442785f45a0797212852ee5199ef12eed98"},
- {file = "opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09a332b50488e2dda866a6c5573ee192fe3583239fb26ff2f7f9ceb0bc119ea6"},
- {file = "opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ace140fc6d647fbe1c692bcb2abce768973491222c067c131d80957c595b71f"},
- {file = "opencv_python-4.10.0.84-cp37-abi3-win32.whl", hash = "sha256:2db02bb7e50b703f0a2d50c50ced72e95c574e1e5a0bb35a8a86d0b35c98c236"},
- {file = "opencv_python-4.10.0.84-cp37-abi3-win_amd64.whl", hash = "sha256:32dbbd94c26f611dc5cc6979e6b7aa1f55a64d6b463cc1dcd3c95505a63e48fe"},
-]
-
-[package.dependencies]
-numpy = [
- {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
- {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
- {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
- {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
-]
-
-[[package]]
-name = "orderly-set"
-version = "5.2.2"
-description = "Orderly set"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "orderly_set-5.2.2-py3-none-any.whl", hash = "sha256:f7a37c95a38c01cdfe41c3ffb62925a318a2286ea0a41790c057fc802aec54da"},
- {file = "orderly_set-5.2.2.tar.gz", hash = "sha256:52a18b86aaf3f5d5a498bbdb27bf3253a4e5c57ab38e5b7a56fa00115cd28448"},
-]
-
-[[package]]
-name = "overrides"
-version = "7.7.0"
-description = "A decorator to automatically detect mismatch when overriding a method."
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"},
- {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"},
-]
-
-[[package]]
-name = "packaging"
-version = "24.1"
-description = "Core utilities for Python packages"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
- {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
-]
-
-[[package]]
-name = "pandas"
-version = "2.2.3"
-description = "Powerful data structures for data analysis, time series, and statistics"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
- {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
- {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"},
- {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"},
- {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"},
- {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"},
- {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"},
- {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"},
- {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"},
- {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"},
- {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"},
- {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"},
- {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"},
- {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"},
- {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"},
- {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"},
- {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"},
- {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"},
- {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"},
- {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"},
- {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"},
- {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"},
- {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"},
- {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"},
- {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"},
- {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"},
- {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"},
- {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"},
- {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"},
- {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"},
- {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"},
- {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"},
- {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"},
- {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"},
- {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"},
- {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"},
- {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"},
- {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"},
- {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"},
- {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"},
- {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"},
- {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"},
-]
-
-[package.dependencies]
-numpy = [
- {version = ">=1.22.4", markers = "python_version < \"3.11\""},
- {version = ">=1.23.2", markers = "python_version == \"3.11\""},
- {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
-]
-python-dateutil = ">=2.8.2"
-pytz = ">=2020.1"
-tzdata = ">=2022.7"
-
-[package.extras]
-all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"]
-aws = ["s3fs (>=2022.11.0)"]
-clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"]
-compression = ["zstandard (>=0.19.0)"]
-computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"]
-consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
-excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"]
-feather = ["pyarrow (>=10.0.1)"]
-fss = ["fsspec (>=2022.11.0)"]
-gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"]
-hdf5 = ["tables (>=3.8.0)"]
-html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"]
-mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"]
-output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"]
-parquet = ["pyarrow (>=10.0.1)"]
-performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"]
-plot = ["matplotlib (>=3.6.3)"]
-postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"]
-pyarrow = ["pyarrow (>=10.0.1)"]
-spss = ["pyreadstat (>=1.2.0)"]
-sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"]
-test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
-xml = ["lxml (>=4.9.2)"]
-
-[[package]]
-name = "pandocfilters"
-version = "1.5.1"
-description = "Utilities for writing pandoc filters in python"
-optional = true
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
- {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"},
- {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"},
-]
-
-[[package]]
-name = "parso"
-version = "0.8.4"
-description = "A Python Parser"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"},
- {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"},
-]
-
-[package.extras]
-qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
-testing = ["docopt", "pytest"]
-
-[[package]]
-name = "pathlib"
-version = "1.0.1"
-description = "Object-oriented filesystem paths"
-optional = true
-python-versions = "*"
-files = [
- {file = "pathlib-1.0.1-py3-none-any.whl", hash = "sha256:f35f95ab8b0f59e6d354090350b44a80a80635d22efdedfa84c7ad1cf0a74147"},
- {file = "pathlib-1.0.1.tar.gz", hash = "sha256:6940718dfc3eff4258203ad5021090933e5c04707d5ca8cc9e73c94a7894ea9f"},
-]
-
-[[package]]
-name = "pettingzoo"
-version = "1.24.3"
-description = "Gymnasium for multi-agent reinforcement learning."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pettingzoo-1.24.3-py3-none-any.whl", hash = "sha256:23ed90517d2e8a7098bdaf5e31234b3a7f7b73ca578d70d1ca7b9d0cb0e37982"},
- {file = "pettingzoo-1.24.3.tar.gz", hash = "sha256:91f9094f18e06fb74b98f4099cd22e8ae4396125e51719d50b30c9f1c7ab07e6"},
-]
-
-[package.dependencies]
-gymnasium = ">=0.28.0"
-numpy = ">=1.21.0"
-
-[package.extras]
-all = ["box2d-py (==2.3.5)", "chess (==1.9.4)", "multi-agent-ale-py (==0.1.11)", "pillow (>=8.0.1)", "pygame (==2.3.0)", "pymunk (==6.2.0)", "rlcard (==1.0.5)", "scipy (>=1.4.1)", "shimmy[openspiel] (>=1.2.0)"]
-atari = ["multi-agent-ale-py (==0.1.11)", "pygame (==2.3.0)"]
-butterfly = ["pygame (==2.3.0)", "pymunk (==6.2.0)"]
-classic = ["chess (==1.9.4)", "pygame (==2.3.0)", "rlcard (==1.0.5)", "shimmy[openspiel] (>=1.2.0)"]
-mpe = ["pygame (==2.3.0)"]
-other = ["pillow (>=8.0.1)"]
-sisl = ["box2d-py (==2.3.5)", "pygame (==2.3.0)", "pymunk (==6.2.0)", "scipy (>=1.4.1)"]
-testing = ["AutoROM", "pre-commit", "pynput", "pytest", "pytest-cov", "pytest-markdown-docs", "pytest-xdist"]
-
-[[package]]
-name = "pexpect"
-version = "4.9.0"
-description = "Pexpect allows easy control of interactive console applications."
-optional = true
-python-versions = "*"
-files = [
- {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"},
- {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"},
-]
-
-[package.dependencies]
-ptyprocess = ">=0.5"
-
-[[package]]
-name = "pfzy"
-version = "0.3.4"
-description = "Python port of the fzy fuzzy string matching algorithm"
-optional = false
-python-versions = ">=3.7,<4.0"
-files = [
- {file = "pfzy-0.3.4-py3-none-any.whl", hash = "sha256:5f50d5b2b3207fa72e7ec0ef08372ef652685470974a107d0d4999fc5a903a96"},
- {file = "pfzy-0.3.4.tar.gz", hash = "sha256:717ea765dd10b63618e7298b2d98efd819e0b30cd5905c9707223dceeb94b3f1"},
-]
-
-[package.extras]
-docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst-parser (>=0.15.1,<0.16.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.4.0,<0.5.0)"]
-
-[[package]]
-name = "pillow"
-version = "10.4.0"
-description = "Python Imaging Library (Fork)"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
- {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"},
- {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"},
- {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"},
- {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"},
- {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"},
- {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"},
- {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"},
- {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"},
- {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"},
- {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"},
- {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"},
- {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"},
- {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"},
- {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"},
- {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"},
- {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"},
- {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"},
- {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"},
- {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"},
- {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"},
- {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"},
- {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"},
- {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"},
- {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"},
- {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"},
- {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"},
- {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"},
- {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"},
- {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"},
- {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"},
- {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"},
- {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"},
- {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"},
- {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"},
- {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"},
- {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"},
- {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"},
- {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"},
- {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"},
- {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"},
- {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"},
- {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"},
- {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"},
- {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"},
- {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"},
- {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"},
- {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"},
- {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"},
- {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"},
- {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"},
- {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"},
- {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"},
- {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"},
- {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"},
- {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"},
- {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"},
- {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"},
- {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"},
- {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"},
- {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"},
- {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"},
- {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"},
- {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"},
- {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"},
- {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"},
- {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"},
- {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"},
-]
-
-[package.extras]
-docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
-fpx = ["olefile"]
-mic = ["olefile"]
-tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
-typing = ["typing-extensions"]
-xmp = ["defusedxml"]
-
-[[package]]
-name = "pixel-ring"
-version = "0.1.0"
-description = "respeaker series pixel ring library"
-optional = true
-python-versions = "*"
-files = [
- {file = "pixel-ring-0.1.0.tar.gz", hash = "sha256:9480f23b58ccb912321b989d00e9d31f087f7bbcd8d970fca0fb319853d03270"},
- {file = "pixel_ring-0.1.0-py2.py3-none-any.whl", hash = "sha256:c0fa51beb67be81b1f6ab058f651c489d69b47fb884d4361a0cf7594f093885b"},
-]
-
-[package.dependencies]
-pyusb = "*"
-spidev = "*"
-
-[[package]]
-name = "platformdirs"
-version = "4.3.6"
-description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
- {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
-]
-
-[package.extras]
-docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
-test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"]
-type = ["mypy (>=1.11.2)"]
-
-[[package]]
-name = "plotly"
-version = "5.24.1"
-description = "An open-source, interactive data visualization library for Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"},
- {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"},
-]
-
-[package.dependencies]
-packaging = "*"
-tenacity = ">=6.2.0"
-
-[[package]]
-name = "pluggy"
-version = "1.5.0"
-description = "plugin and hook calling mechanisms for python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
- {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
-]
-
-[package.extras]
-dev = ["pre-commit", "tox"]
-testing = ["pytest", "pytest-benchmark"]
-
-[[package]]
-name = "pre-commit"
-version = "4.0.1"
-description = "A framework for managing and maintaining multi-language pre-commit hooks."
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"},
- {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"},
-]
-
-[package.dependencies]
-cfgv = ">=2.0.0"
-identify = ">=1.0.0"
-nodeenv = ">=0.11.1"
-pyyaml = ">=5.1"
-virtualenv = ">=20.10.0"
-
-[[package]]
-name = "prometheus-client"
-version = "0.21.0"
-description = "Python client for the Prometheus monitoring system."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"},
- {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"},
-]
-
-[package.extras]
-twisted = ["twisted"]
-
-[[package]]
-name = "prompt-toolkit"
-version = "3.0.48"
-description = "Library for building powerful interactive command lines in Python"
-optional = false
-python-versions = ">=3.7.0"
-files = [
- {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"},
- {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"},
-]
-
-[package.dependencies]
-wcwidth = "*"
-
-[[package]]
-name = "propcache"
-version = "0.2.0"
-description = "Accelerated property cache"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"},
- {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"},
- {file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"},
- {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"},
- {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"},
- {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"},
- {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"},
- {file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"},
- {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"},
- {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"},
- {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"},
- {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"},
- {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"},
- {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"},
- {file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"},
- {file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"},
- {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"},
- {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"},
- {file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"},
- {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc2db02409338bf36590aa985a461b2c96fce91f8e7e0f14c50c5fcc4f229016"},
- {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6ed8db0a556343d566a5c124ee483ae113acc9a557a807d439bcecc44e7dfbb"},
- {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91997d9cb4a325b60d4e3f20967f8eb08dfcb32b22554d5ef78e6fd1dda743a2"},
- {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7dde9e533c0a49d802b4f3f218fa9ad0a1ce21f2c2eb80d5216565202acab4"},
- {file = "propcache-0.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcad6c564fe6b9b8916c1aefbb37a362deebf9394bd2974e9d84232e3e08504"},
- {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:97a58a28bcf63284e8b4d7b460cbee1edaab24634e82059c7b8c09e65284f178"},
- {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:945db8ee295d3af9dbdbb698cce9bbc5c59b5c3fe328bbc4387f59a8a35f998d"},
- {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39e104da444a34830751715f45ef9fc537475ba21b7f1f5b0f4d71a3b60d7fe2"},
- {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c5ecca8f9bab618340c8e848d340baf68bcd8ad90a8ecd7a4524a81c1764b3db"},
- {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c436130cc779806bdf5d5fae0d848713105472b8566b75ff70048c47d3961c5b"},
- {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:191db28dc6dcd29d1a3e063c3be0b40688ed76434622c53a284e5427565bbd9b"},
- {file = "propcache-0.2.0-cp311-cp311-win32.whl", hash = "sha256:5f2564ec89058ee7c7989a7b719115bdfe2a2fb8e7a4543b8d1c0cc4cf6478c1"},
- {file = "propcache-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e2e54267980349b723cff366d1e29b138b9a60fa376664a157a342689553f71"},
- {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"},
- {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"},
- {file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"},
- {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"},
- {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"},
- {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"},
- {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"},
- {file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"},
- {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"},
- {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"},
- {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"},
- {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"},
- {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"},
- {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"},
- {file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"},
- {file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"},
- {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7"},
- {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763"},
- {file = "propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d"},
- {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a"},
- {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b"},
- {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb"},
- {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf"},
- {file = "propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2"},
- {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f"},
- {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136"},
- {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325"},
- {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44"},
- {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83"},
- {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544"},
- {file = "propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032"},
- {file = "propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e"},
- {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:53d1bd3f979ed529f0805dd35ddaca330f80a9a6d90bc0121d2ff398f8ed8861"},
- {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:83928404adf8fb3d26793665633ea79b7361efa0287dfbd372a7e74311d51ee6"},
- {file = "propcache-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77a86c261679ea5f3896ec060be9dc8e365788248cc1e049632a1be682442063"},
- {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218db2a3c297a3768c11a34812e63b3ac1c3234c3a086def9c0fee50d35add1f"},
- {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7735e82e3498c27bcb2d17cb65d62c14f1100b71723b68362872bca7d0913d90"},
- {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20a617c776f520c3875cf4511e0d1db847a076d720714ae35ffe0df3e440be68"},
- {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b69535c870670c9f9b14a75d28baa32221d06f6b6fa6f77a0a13c5a7b0a5b9"},
- {file = "propcache-0.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4569158070180c3855e9c0791c56be3ceeb192defa2cdf6a3f39e54319e56b89"},
- {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:db47514ffdbd91ccdc7e6f8407aac4ee94cc871b15b577c1c324236b013ddd04"},
- {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:2a60ad3e2553a74168d275a0ef35e8c0a965448ffbc3b300ab3a5bb9956c2162"},
- {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:662dd62358bdeaca0aee5761de8727cfd6861432e3bb828dc2a693aa0471a563"},
- {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:25a1f88b471b3bc911d18b935ecb7115dff3a192b6fef46f0bfaf71ff4f12418"},
- {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f60f0ac7005b9f5a6091009b09a419ace1610e163fa5deaba5ce3484341840e7"},
- {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:74acd6e291f885678631b7ebc85d2d4aec458dd849b8c841b57ef04047833bed"},
- {file = "propcache-0.2.0-cp38-cp38-win32.whl", hash = "sha256:d9b6ddac6408194e934002a69bcaadbc88c10b5f38fb9307779d1c629181815d"},
- {file = "propcache-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:676135dcf3262c9c5081cc8f19ad55c8a64e3f7282a21266d05544450bffc3a5"},
- {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:25c8d773a62ce0451b020c7b29a35cfbc05de8b291163a7a0f3b7904f27253e6"},
- {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:375a12d7556d462dc64d70475a9ee5982465fbb3d2b364f16b86ba9135793638"},
- {file = "propcache-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ec43d76b9677637a89d6ab86e1fef70d739217fefa208c65352ecf0282be957"},
- {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45eec587dafd4b2d41ac189c2156461ebd0c1082d2fe7013571598abb8505d1"},
- {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc092ba439d91df90aea38168e11f75c655880c12782facf5cf9c00f3d42b562"},
- {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa1076244f54bb76e65e22cb6910365779d5c3d71d1f18b275f1dfc7b0d71b4d"},
- {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:682a7c79a2fbf40f5dbb1eb6bfe2cd865376deeac65acf9beb607505dced9e12"},
- {file = "propcache-0.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e40876731f99b6f3c897b66b803c9e1c07a989b366c6b5b475fafd1f7ba3fb8"},
- {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:363ea8cd3c5cb6679f1c2f5f1f9669587361c062e4899fce56758efa928728f8"},
- {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:140fbf08ab3588b3468932974a9331aff43c0ab8a2ec2c608b6d7d1756dbb6cb"},
- {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e70fac33e8b4ac63dfc4c956fd7d85a0b1139adcfc0d964ce288b7c527537fea"},
- {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b33d7a286c0dc1a15f5fc864cc48ae92a846df287ceac2dd499926c3801054a6"},
- {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f6d5749fdd33d90e34c2efb174c7e236829147a2713334d708746e94c4bde40d"},
- {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22aa8f2272d81d9317ff5756bb108021a056805ce63dd3630e27d042c8092798"},
- {file = "propcache-0.2.0-cp39-cp39-win32.whl", hash = "sha256:73e4b40ea0eda421b115248d7e79b59214411109a5bc47d0d48e4c73e3b8fcf9"},
- {file = "propcache-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:9517d5e9e0731957468c29dbfd0f976736a0e55afaea843726e887f36fe017df"},
- {file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"},
- {file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"},
-]
-
-[[package]]
-name = "protobuf"
-version = "5.28.2"
-description = ""
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"},
- {file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"},
- {file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"},
- {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"},
- {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"},
- {file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"},
- {file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"},
- {file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"},
- {file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"},
- {file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"},
- {file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"},
-]
-
-[[package]]
-name = "psutil"
-version = "6.0.0"
-description = "Cross-platform lib for process and system monitoring in Python."
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
-files = [
- {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"},
- {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"},
- {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"},
- {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"},
- {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"},
- {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"},
- {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"},
- {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"},
- {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"},
- {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"},
- {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"},
- {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"},
- {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"},
- {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"},
- {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"},
- {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"},
- {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"},
-]
-
-[package.extras]
-test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
-
-[[package]]
-name = "ptyprocess"
-version = "0.7.0"
-description = "Run a subprocess in a pseudo terminal"
-optional = true
-python-versions = "*"
-files = [
- {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
- {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
-]
-
-[[package]]
-name = "pure-eval"
-version = "0.2.3"
-description = "Safely evaluate AST nodes without side effects"
-optional = true
-python-versions = "*"
-files = [
- {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"},
- {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"},
-]
-
-[package.extras]
-tests = ["pytest"]
-
-[[package]]
-name = "pyarrow"
-version = "17.0.0"
-description = "Python library for Apache Arrow"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"},
- {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"},
- {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"},
- {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"},
- {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"},
- {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"},
- {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"},
- {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"},
- {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"},
- {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"},
- {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"},
- {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"},
- {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"},
- {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"},
- {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"},
- {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"},
- {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"},
- {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"},
- {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"},
- {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"},
- {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"},
- {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"},
- {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"},
- {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"},
- {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"},
- {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"},
- {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"},
- {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"},
- {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"},
- {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"},
- {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"},
- {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"},
- {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"},
- {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"},
- {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"},
- {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"},
-]
-
-[package.dependencies]
-numpy = ">=1.16.6"
-
-[package.extras]
-test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"]
-
-[[package]]
-name = "pyaudio"
-version = "0.2.14"
-description = "Cross-platform audio I/O with PortAudio"
-optional = true
-python-versions = "*"
-files = [
- {file = "PyAudio-0.2.14-cp310-cp310-win32.whl", hash = "sha256:126065b5e82a1c03ba16e7c0404d8f54e17368836e7d2d92427358ad44fefe61"},
- {file = "PyAudio-0.2.14-cp310-cp310-win_amd64.whl", hash = "sha256:2a166fc88d435a2779810dd2678354adc33499e9d4d7f937f28b20cc55893e83"},
- {file = "PyAudio-0.2.14-cp311-cp311-win32.whl", hash = "sha256:506b32a595f8693811682ab4b127602d404df7dfc453b499c91a80d0f7bad289"},
- {file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"},
- {file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"},
- {file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"},
- {file = "PyAudio-0.2.14-cp313-cp313-win32.whl", hash = "sha256:95328285b4dab57ea8c52a4a996cb52be6d629353315be5bfda403d15932a497"},
- {file = "PyAudio-0.2.14-cp313-cp313-win_amd64.whl", hash = "sha256:692d8c1446f52ed2662120bcd9ddcb5aa2b71f38bda31e58b19fb4672fffba69"},
- {file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"},
- {file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"},
- {file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"},
- {file = "PyAudio-0.2.14-cp39-cp39-win_amd64.whl", hash = "sha256:009f357ee5aa6bc8eb19d69921cd30e98c42cddd34210615d592a71d09c4bd57"},
- {file = "PyAudio-0.2.14.tar.gz", hash = "sha256:78dfff3879b4994d1f4fc6485646a57755c6ee3c19647a491f790a0895bd2f87"},
-]
-
-[package.extras]
-test = ["numpy"]
-
-[[package]]
-name = "pyav"
-version = "13.1.0"
-description = "Pythonic bindings for FFmpeg's libraries."
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "pyav-13.1.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:64a81022e60dfba7dee9767a6fd150f42293855ea127979b2f38a3fd86f908fd"},
- {file = "pyav-13.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3971089334cc91e331c5014c8ea5fcbca0ccc82eb14952c128ce50570010a3cf"},
- {file = "pyav-13.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:154394ba47b4b55d4abda3e66e2b0a79e7b046c983191cb6113ea14769eea53a"},
- {file = "pyav-13.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b48efcde03b9952ece3c8a8d9d74c685ff84ab91b60ea0ae6960638e30f3f31"},
- {file = "pyav-13.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:8404d5a5eef975862a35f2338ab8e7ae5d7a7f9af1ac748edef2aca4543f44cd"},
- {file = "pyav-13.1.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:a75d67dc80ea87f3987fafa5699410047af818b20691046c76d12e18faf3da68"},
- {file = "pyav-13.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4571175c8511d36128e94955b8cc64b0452e16da42c81ceae745946f88abf477"},
- {file = "pyav-13.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7e1bd1157b21ca116c71696be62cd12bcaefc32179fd99efad90e0a76d300d3"},
- {file = "pyav-13.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:126386f2f8a0c57487a3ad947ac573385d41326b5ff111783135cc56a8869261"},
- {file = "pyav-13.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:57d94282ffd445ab055c36e150fee1a4a066e0aee259260c82792dbd349ec08d"},
- {file = "pyav-13.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b2daf24cbc01ee666c4893e69aac8fc65bab598ea0029382857930f652a5e5ff"},
- {file = "pyav-13.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83e3a67e2038b8cfd1d5dd2d1a1756ac1143a4c223b1723e64ac8bdb2045fb6a"},
- {file = "pyav-13.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24de515886366b2c952e3827e7fb6466ad06f40b5cb34595a3f922899727be2b"},
- {file = "pyav-13.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66befb4172facfaaf7f3be94b1659051378b0741f087d5b46d2a25b6bce34b4f"},
- {file = "pyav-13.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a65d060fceee59e5a1dd70e64bf6ffca55fff2b596af906b206d8ba0057bbdc5"},
- {file = "pyav-13.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8450899845220a2a4f3ecc3eba0d5f864c169d98a9892be75447e59480162a09"},
- {file = "pyav-13.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6b21df5daadbb019c4612cc89923202ad7a4dd259be905eba56887a14a344861"},
- {file = "pyav-13.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21668b5ea9c4f046f61193a555d3deb2ca633b2ffb27a22a3b0eb03e8da64992"},
- {file = "pyav-13.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae2413955b7d76826d214d3a5b719714f352de7de318e45275811fa07b9efe3"},
- {file = "pyav-13.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a3ba8764bbf958e6c94b0dc7b07f670b4a759a157547a69cddc58eabba8aea1d"},
- {file = "pyav-13.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c92ef209e12660c6a75f81c9d228adc1e07294b875bf91d9b2a58c44a728b2d3"},
- {file = "pyav-13.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2e1855824313c17367c5ba658cf99d8b3169e0c3e0bdef5aa87a4c472c46d72b"},
- {file = "pyav-13.1.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7a2eb79af1d3414509e31631a1b837b011eba4a21e311ae1308eca95a9f4db"},
- {file = "pyav-13.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69566d6b5438259e2e4adc2975591d513b7f1280fbf4ed3e0901be10a4567470"},
- {file = "pyav-13.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2960397dd97d7462effe8e6696557a91f24c2841edf391b0355734db8e4b02cd"},
- {file = "pyav-13.1.0.tar.gz", hash = "sha256:7049f4df6f94b4b727c1339a094f29c4178f3e0c290a01b9fcf0190a9890704c"},
-]
-
-[[package]]
-name = "pycollada"
-version = "0.8"
-description = "python library for reading and writing collada documents"
-optional = true
-python-versions = "*"
-files = [
- {file = "pycollada-0.8.tar.gz", hash = "sha256:f3a3759cc4cec1d59e932aad74399dbcf541d18862aad903c770040da42af20e"},
-]
-
-[package.dependencies]
-numpy = "*"
-python-dateutil = ">=2.2"
-
-[package.extras]
-prettyprint = ["lxml"]
-validation = ["lxml"]
-
-[[package]]
-name = "pycparser"
-version = "2.22"
-description = "C parser in Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
- {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
-]
-
-[[package]]
-name = "pygame"
-version = "2.6.1"
-description = "Python Game Development"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "pygame-2.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9beeb647e555afb5657111fa83acb74b99ad88761108eaea66472e8b8547b55b"},
- {file = "pygame-2.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:10e3d2a55f001f6c0a6eb44aa79ea7607091c9352b946692acedb2ac1482f1c9"},
- {file = "pygame-2.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:816e85000c5d8b02a42b9834f761a5925ef3377d2924e3a7c4c143d2990ce5b8"},
- {file = "pygame-2.6.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a78fd030d98faab4a8e27878536fdff7518d3e062a72761c552f624ebba5a5f"},
- {file = "pygame-2.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da3ad64d685f84a34ebe5daacb39fff14f1251acb34c098d760d63fee768f50c"},
- {file = "pygame-2.6.1-cp310-cp310-win32.whl", hash = "sha256:9dd5c054d4bd875a8caf978b82672f02bec332f52a833a76899220c460bb4b58"},
- {file = "pygame-2.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:00827aba089355925902d533f9c41e79a799641f03746c50a374dc5c3362e43d"},
- {file = "pygame-2.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:20349195326a5e82a16e351ed93465a7845a7e2a9af55b7bc1b2110ea3e344e1"},
- {file = "pygame-2.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f3935459109da4bb0b3901da9904f0a3e52028a3332a355d298b1673a334cf21"},
- {file = "pygame-2.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c31dbdb5d0217f32764797d21c2752e258e5fb7e895326538d82b5f75a0cd856"},
- {file = "pygame-2.6.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:173badf82fa198e6888017bea40f511cb28e69ecdd5a72b214e81e4dcd66c3b1"},
- {file = "pygame-2.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce8cc108b92de9b149b344ad2e25eedbe773af0dc41dfb24d1f07f679b558c60"},
- {file = "pygame-2.6.1-cp311-cp311-win32.whl", hash = "sha256:811e7b925146d8149d79193652cbb83e0eca0aae66476b1cb310f0f4226b8b5c"},
- {file = "pygame-2.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:91476902426facd4bb0dad4dc3b2573bc82c95c71b135e0daaea072ed528d299"},
- {file = "pygame-2.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4ee7f2771f588c966fa2fa8b829be26698c9b4836f82ede5e4edc1a68594942e"},
- {file = "pygame-2.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c8040ea2ab18c6b255af706ec01355c8a6b08dc48d77fd4ee783f8fc46a843bf"},
- {file = "pygame-2.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47a6938de93fa610accd4969e638c2aebcb29b2fca518a84c3a39d91ab47116"},
- {file = "pygame-2.6.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33006f784e1c7d7e466fcb61d5489da59cc5f7eb098712f792a225df1d4e229d"},
- {file = "pygame-2.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1206125f14cae22c44565c9d333607f1d9f59487b1f1432945dfc809aeaa3e88"},
- {file = "pygame-2.6.1-cp312-cp312-win32.whl", hash = "sha256:84fc4054e25262140d09d39e094f6880d730199710829902f0d8ceae0213379e"},
- {file = "pygame-2.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:3a9e7396be0d9633831c3f8d5d82dd63ba373ad65599628294b7a4f8a5a01a65"},
- {file = "pygame-2.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6039f3a55d800db80e8010f387557b528d34d534435e0871326804df2a62f2"},
- {file = "pygame-2.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2a3a1288e2e9b1e5834e425bedd5ba01a3cd4902b5c2bff8ed4a740ccfe98171"},
- {file = "pygame-2.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27eb17e3dc9640e4b4683074f1890e2e879827447770470c2aba9f125f74510b"},
- {file = "pygame-2.6.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c1623180e70a03c4a734deb9bac50fc9c82942ae84a3a220779062128e75f3b"},
- {file = "pygame-2.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef07c0103d79492c21fced9ad68c11c32efa6801ca1920ebfd0f15fb46c78b1c"},
- {file = "pygame-2.6.1-cp313-cp313-win32.whl", hash = "sha256:3acd8c009317190c2bfd81db681ecef47d5eb108c2151d09596d9c7ea9df5c0e"},
- {file = "pygame-2.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:813af4fba5d0b2cb8e58f5d95f7910295c34067dcc290d34f1be59c48bd1ea6a"},
- {file = "pygame-2.6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:56ffca6059b165bbf64f4b4be23b8068f6a0e220780e4f96ec0bb5ac3c63ec39"},
- {file = "pygame-2.6.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bede70ec708057e305815d6546012669226d1d80566785feca9b044216062e7"},
- {file = "pygame-2.6.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f84f15d146d6aa93254008a626c56ef96fed276006202881a47b29757f0cd65a"},
- {file = "pygame-2.6.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14f9dda45469b254c0f15edaaeaa85d2cc072ff6a83584a265f5d684c7f7efd8"},
- {file = "pygame-2.6.1-cp36-cp36m-win32.whl", hash = "sha256:28b43190436037e428a5be28fc80cf6615304fd528009f2c688cc828f4ff104b"},
- {file = "pygame-2.6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:a4b8f04fceddd9a3ac30778d11f0254f59efcd1c382d5801271113cea8b4f2f3"},
- {file = "pygame-2.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a620883d589926f157b8f1d1f543183ac52e5c30507dea445e3927ae0bee1c54"},
- {file = "pygame-2.6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b46e68cd168f44d0224c670bb72186688fc692d7079715f79d04096757d703d0"},
- {file = "pygame-2.6.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0b11356ac96261162d54a2c2b41a41978f00525631b01ec9c4fe26b01c66595"},
- {file = "pygame-2.6.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:325a84d072d52e3c2921eff02f87c6a74b7e77d71db3bdf53801c6c975f1b6c4"},
- {file = "pygame-2.6.1-cp37-cp37m-win32.whl", hash = "sha256:2a615d78b2364e86f541458ff41c2a46181b9a1e9eabd97b389282fdf04efbb3"},
- {file = "pygame-2.6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:94afd1177680d92f9214c54966ad3517d18210c4fbc5d84a0192d218e93647e0"},
- {file = "pygame-2.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97ac4e13847b6b293ecaffa5ffce9886c98d09c03309406931cc592f0cea6366"},
- {file = "pygame-2.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d1a7f2b66ac2e4c9583b6d4c6d6f346fb10a3392c04163f537061f86a448ed5c"},
- {file = "pygame-2.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac3f033d2be4a9e23660a96afe2986df3a6916227538a6a0061bc218c5088507"},
- {file = "pygame-2.6.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1bf7ab5311bbced70320f1a56701650b4c18231343ae5af42111eea91e0949a"},
- {file = "pygame-2.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21160d9093533eb831f1b708e630706e5ac16b30750571ec27bc3b8364814f38"},
- {file = "pygame-2.6.1-cp38-cp38-win32.whl", hash = "sha256:7bffdd3eaf394d9645331d1c3a5df9d782ebcc3c5a78f3b657c7879a828dd111"},
- {file = "pygame-2.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:818b4eaec9c4acb6ac64805d4ca8edd4062bebca77bd815c18739fe2842c97e9"},
- {file = "pygame-2.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15efaa11a80a65dd589a95bebe812fa5bfc7e14946b638a424c5bd9ac6cca1a4"},
- {file = "pygame-2.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:481cfe1bdbb7fe00acc5950c494c26f00240888619bdc396fc8c39a734797432"},
- {file = "pygame-2.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d09fd950725d187aa5207c0cb8eb9ab0d2f8ce9ab8d189c30eeb470e71b617e"},
- {file = "pygame-2.6.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:163e66de169bd5670c86e27d0b74aad0d2d745e3b63cf4e7eb5b2bff1231ca8d"},
- {file = "pygame-2.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6e8d0547f30ddc845f4fd1e33070ef548233ad0dbf21f7ecea768883d1bbdc"},
- {file = "pygame-2.6.1-cp39-cp39-win32.whl", hash = "sha256:d29eb9a93f12aa3d997b6e3c447ac85b2a4b142ab2548441523a8fcf5e216042"},
- {file = "pygame-2.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:6582aa71a681e02e55d43150a9ab41394e6bf4d783d2962a10aea58f424be060"},
- {file = "pygame-2.6.1-pp36-pypy36_pp73-win32.whl", hash = "sha256:4a8ea113b1bf627322a025a1a5a87e3818a7f55ab3a4077ff1ae5c8c60576614"},
- {file = "pygame-2.6.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b7f9f8e6f76de36f4725175d686601214af362a4f30614b4dae2240198e72e6f"},
- {file = "pygame-2.6.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bbb7167c92103a2091366e9af26d4914ba3776666e8677d3c93551353fffa626"},
- {file = "pygame-2.6.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17498a2b043bc0e795faedef1b081199c688890200aef34991c1941caa2d2c89"},
- {file = "pygame-2.6.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7103c60939bbc1e05cfc7ba3f1d2ad3bbf103b7828b82a7166a9ab6f51950146"},
- {file = "pygame-2.6.1.tar.gz", hash = "sha256:56fb02ead529cee00d415c3e007f75e0780c655909aaa8e8bf616ee09c9feb1f"},
-]
-
-[[package]]
-name = "pyglet"
-version = "2.0.18"
-description = "pyglet is a cross-platform games and multimedia package."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pyglet-2.0.18-py3-none-any.whl", hash = "sha256:e592952ae0297e456c587b6486ed8c3e5f9d0c3519d517bb92dde5fdf4c26b41"},
- {file = "pyglet-2.0.18.tar.gz", hash = "sha256:7cf9238d70082a2da282759679f8a011cc979753a32224a8ead8ed80e48f99dc"},
-]
-
-[[package]]
-name = "pygments"
-version = "2.18.0"
-description = "Pygments is a syntax highlighting package written in Python."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"},
- {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"},
-]
-
-[package.extras]
-windows-terminal = ["colorama (>=0.4.6)"]
-
-[[package]]
-name = "pymunk"
-version = "6.8.1"
-description = "Pymunk is a easy-to-use pythonic 2D physics library"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "pymunk-6.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4203cb73ab1ecffbe6ff2c903542987828eec204acb012eba41592303a63a85c"},
- {file = "pymunk-6.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aff5d00f05f78ab98f3cb699ba417db1eca1fe07ac88cb0f70a850d1f06d94bb"},
- {file = "pymunk-6.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c9357179aba3501d250ce8a8b62ad59968c0e9be4ea330a31aab70d4907b5fd"},
- {file = "pymunk-6.8.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c689886456f53554e8961bd4d3addc0bbe25999afa55c2990d59543dd6ad1bc"},
- {file = "pymunk-6.8.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:94cc4322041fa6ba429dee897e9d269339cd6fa15ea5b46783b7f67ccf31c8f4"},
- {file = "pymunk-6.8.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:acd808e4c9596d521308816eacc077eff758255713b156dad6d7d666f98d71ac"},
- {file = "pymunk-6.8.1-cp310-cp310-win32.whl", hash = "sha256:c17fd57e40fc2cfd63bef693a8a90cc9a033665ecebbd0cd989482bb188857ed"},
- {file = "pymunk-6.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:655722a1d907ab66c2a5aaffd469cd997aa79f02860dd974e7475783945bd1a0"},
- {file = "pymunk-6.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a6d1615c7665dabd33768e40b81eaf1bbce65d36d54f0cc17d415aa1d98f249"},
- {file = "pymunk-6.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a0980cc537ef149af688cd0dbf0e447f924eb05818e9cb92c7950342e5eba7ce"},
- {file = "pymunk-6.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d353ffc0396115ebcd8a006fc97106d3b3f91adc842fa0f451c872cdbb21128"},
- {file = "pymunk-6.8.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e6bd6600628ad87b56645ee0ffc24d6623f2c941b5c4aa5058722ab17335d80"},
- {file = "pymunk-6.8.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6e66d22814cccada953d126dadc0578dca0e6eb39431523e533701d3ba4c3fac"},
- {file = "pymunk-6.8.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:84bc02b5512cf60c38cf9c9ecff24eb733437a28de20f4b362a1c27400e23ed3"},
- {file = "pymunk-6.8.1-cp311-cp311-win32.whl", hash = "sha256:64ea1acf1c2f29a03c2121db39107253d67c4f800e8c472c2a599808103b5e99"},
- {file = "pymunk-6.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ef1a82fb68dd3f2e3351591cbf594fce4e49a80271ebb7af643d41a53d95a23"},
- {file = "pymunk-6.8.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c2961a239430f6326932f51274746fd5236d6b274d7b2b84fd44b9c2a73a888b"},
- {file = "pymunk-6.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb1ae0194393165c57974c8edc2cfda144a9b20dd2906cb38ec22dfb65c7fa88"},
- {file = "pymunk-6.8.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8320917fc7ed750ccc22a8f67a5b5a375b31c5492d491ef3783e87537887a4"},
- {file = "pymunk-6.8.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:704da8ff41eb06d4987003a8a0f2ef603dde9bf224bf6f85356f6dace5e23ac5"},
- {file = "pymunk-6.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6e5c4e596cf9d715cae26330a464335f39ca25a61a923f14eaac720b82b6a6be"},
- {file = "pymunk-6.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d4b9c31cea1dc75719cb3ae45505034ed51bde92799b598f8fb234b06dac33a"},
- {file = "pymunk-6.8.1-cp312-cp312-win32.whl", hash = "sha256:d7e12be02ed37e323598ccae914386f55eb20d5ee08c013df1b43141ef8a6b56"},
- {file = "pymunk-6.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:e510666591d6cef0ab5f7752f2796d244c3131980a35106a6f3a0c03c34a378c"},
- {file = "pymunk-6.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:abbf77226e4e8a7e9140ae452b764954c071ba62031e3db9ea65549c9f88e495"},
- {file = "pymunk-6.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:250fdd54cc8dc3f0ebb67b41e8caf1400ce3ca882c513540a7925dac5cec6392"},
- {file = "pymunk-6.8.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:279b3896a79c8701a69a5cefdcb7c98c4bc50687208e488f90c50bd5014e9871"},
- {file = "pymunk-6.8.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:2a844c7be3cb39da5dc546e30158f6f971fc509e63b3937c34be5a54b9d6457d"},
- {file = "pymunk-6.8.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:e2a1480d9fa6db0ff3815a4b70a499be7992a648a4fcd3883677ec52b6e0d1cd"},
- {file = "pymunk-6.8.1-cp37-cp37m-win32.whl", hash = "sha256:da606cd607ea0bed9e98c7bf414f588feb5decf66530a60369e095ac7a9d0c14"},
- {file = "pymunk-6.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:add65195ebc7e9b94b1aaaca16064f4290094ead3971d0b53294a9531e39e96f"},
- {file = "pymunk-6.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:821eaa9b843e385315f1981fc97a352dc20bdc65c645e852bd11382764bad860"},
- {file = "pymunk-6.8.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:05ac390f59a0ae385af7912b97c5a5240694a3c3b2775e3975abbfdafdb7edc4"},
- {file = "pymunk-6.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4b0b8af9ebdebbd2831452d1ebcd29cf0ae2a7968c24d40c90fdcef8746d9"},
- {file = "pymunk-6.8.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:afce20a8eea3da231a366329b513172b60009615dab4ebcc3c2a3f320ec33306"},
- {file = "pymunk-6.8.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7b6d8bec17f4ce008eced7c46fdc80aa563ec5a9b383471031e7a94dece5eb8d"},
- {file = "pymunk-6.8.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:00f66f6232263e80432fa34a3cd05065f6a1c9c2a29b50a26938a11b8a2bf6eb"},
- {file = "pymunk-6.8.1-cp38-cp38-win32.whl", hash = "sha256:19c2589a53ad97ce08473b69b76c5af6fb24b90f1da734ccfaaae21f50616095"},
- {file = "pymunk-6.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:6de1d49b6976ea46589caba9275165b85adbdcd93d744ae5e30ddce853f54502"},
- {file = "pymunk-6.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:59c333e03f1d772c6872b973629b1b780b117744e3ef74badbb02c2ecd4cd28d"},
- {file = "pymunk-6.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:923c53ec3306b3cea4d9f5bc9beefb668e4318187057d3a89c470fa88a912bc1"},
- {file = "pymunk-6.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcb5fc5ae53a78aadc123750d9341ec3e99539a9f3ba7d2fca70ecfc690272f8"},
- {file = "pymunk-6.8.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f18c715c837cfc1c76f2865a79cee72e934c6bb3197042d328e6af9cda2e85f"},
- {file = "pymunk-6.8.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:9226ac3008d47e55c8cef3464b0f257ff1e613baac46b8adebbf832421ba008d"},
- {file = "pymunk-6.8.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e8b00579859194250fe574828d5f83907fa71af3fa1f5eb6c9887343feb2e7f"},
- {file = "pymunk-6.8.1-cp39-cp39-win32.whl", hash = "sha256:96ac4537c23fe5310339ef8dec82a923a0540ab16668ece2ece26cb8986a4358"},
- {file = "pymunk-6.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:e2b7f83b2d1fc8e6b2b01f8627e52bc84a10758c53a58df7a932958d9593de71"},
- {file = "pymunk-6.8.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8aa4fc263155f9f515b371a8d428f1769cdcebe0e772a26990c8a2ba2e6240f2"},
- {file = "pymunk-6.8.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:67ba6be15d3d06b49f7693d3ad3f271638e4991791edf968a292de0185f3a25d"},
- {file = "pymunk-6.8.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85624fab1f23a757a689d3c4404ba8629eeccbb0d37cd937d6d381e47f9e8bc3"},
- {file = "pymunk-6.8.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c12c542e2ff8a953b390473dea02dc84e5d40d7b71f87dd36ce9ab242718444"},
- {file = "pymunk-6.8.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4eadb3f96abcb36c570a0e560d81cdafd2bc4b5b89f1230e42ec7581405ab643"},
- {file = "pymunk-6.8.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:66c0a56c5c816519c917f996361f3fb673d3ebccaad8c5d4d14764629a14e906"},
- {file = "pymunk-6.8.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:90cd20090aab3001e400406aa782dcfc798adb949e98fcd84182d108da050c00"},
- {file = "pymunk-6.8.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c58fc4fcfc1ea988596bc1198270ccd255572a95d929b43a1fc40424cb7a7d64"},
- {file = "pymunk-6.8.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e94c41a41c1019bec97db6624ae793fb2dceb68f668a80c74c7def3d80286f3f"},
- {file = "pymunk-6.8.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2900fed03f8a4cccbb94c640bdadb1230f0cf3f9667b775df7bd563bbd0c6231"},
- {file = "pymunk-6.8.1.tar.gz", hash = "sha256:e04061e9d05c6163b83bb15a39b595da199bb5f500b06acb5f2d50f3d61ef429"},
-]
-
-[package.dependencies]
-cffi = ">=1.15.0"
-
-[package.extras]
-dev = ["aafigure", "matplotlib", "numpy", "pygame", "pyglet (<2.0.0)", "sphinx", "wheel"]
-
-[[package]]
-name = "pynput"
-version = "1.7.7"
-description = "Monitor and control user input devices"
-optional = true
-python-versions = "*"
-files = [
- {file = "pynput-1.7.7-py2.py3-none-any.whl", hash = "sha256:afc43f651684c98818de048abc76adf9f2d3d797083cb07c1f82be764a2d44cb"},
-]
-
-[package.dependencies]
-evdev = {version = ">=1.3", markers = "sys_platform in \"linux\""}
-pyobjc-framework-ApplicationServices = {version = ">=8.0", markers = "sys_platform == \"darwin\""}
-pyobjc-framework-Quartz = {version = ">=8.0", markers = "sys_platform == \"darwin\""}
-python-xlib = {version = ">=0.17", markers = "sys_platform in \"linux\""}
-six = "*"
-
-[[package]]
-name = "pyobjc-core"
-version = "10.3.1"
-description = "Python<->ObjC Interoperability Module"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pyobjc_core-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ea46d2cda17921e417085ac6286d43ae448113158afcf39e0abe484c58fb3d78"},
- {file = "pyobjc_core-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:899d3c84d2933d292c808f385dc881a140cf08632907845043a333a9d7c899f9"},
- {file = "pyobjc_core-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:6ff5823d13d0a534cdc17fa4ad47cf5bee4846ce0fd27fc40012e12b46db571b"},
- {file = "pyobjc_core-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2581e8e68885bcb0e11ec619e81ef28e08ee3fac4de20d8cc83bc5af5bcf4a90"},
- {file = "pyobjc_core-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea98d4c2ec39ca29e62e0327db21418696161fb138ee6278daf2acbedf7ce504"},
- {file = "pyobjc_core-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4c179c26ee2123d0aabffb9dbc60324b62b6f8614fb2c2328b09386ef59ef6d8"},
- {file = "pyobjc_core-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cb901fce65c9be420c40d8a6ee6fff5ff27c6945f44fd7191989b982baa66dea"},
- {file = "pyobjc_core-10.3.1.tar.gz", hash = "sha256:b204a80ccc070f9ab3f8af423a3a25a6fd787e228508d00c4c30f8ac538ba720"},
-]
-
-[[package]]
-name = "pyobjc-framework-applicationservices"
-version = "10.3.1"
-description = "Wrappers for the framework ApplicationServices on macOS"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b694260d423c470cb90c3a7009cfde93e332ea6fb4b9b9526ad3acbd33460e3d"},
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d886ba1f65df47b77ff7546f3fc9bc7d08cfb6b3c04433b719f6b0689a2c0d1f"},
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:be157f2c3ffb254064ef38249670af8cada5e519a714d2aa5da3740934d89bc8"},
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:57737f41731661e4a3b78793ec9173f61242a32fa560c3e4e58484465d049c32"},
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c429eca69ee675e781e4e55f79e939196b47f02560ad865b1ba9ac753b90bd77"},
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4f1814a17041a20adca454044080b52e39a4ebc567ad2c6a48866dd4beaa192a"},
- {file = "pyobjc_framework_ApplicationServices-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1252f1137f83eb2c6b9968d8c591363e8859dd2484bc9441d8f365bcfb43a0e4"},
- {file = "pyobjc_framework_applicationservices-10.3.1.tar.gz", hash = "sha256:f27cb64aa4d129ce671fd42638c985eb2a56d544214a95fe3214a007eacc4790"},
-]
-
-[package.dependencies]
-pyobjc-core = ">=10.3.1"
-pyobjc-framework-Cocoa = ">=10.3.1"
-pyobjc-framework-CoreText = ">=10.3.1"
-pyobjc-framework-Quartz = ">=10.3.1"
-
-[[package]]
-name = "pyobjc-framework-cocoa"
-version = "10.3.1"
-description = "Wrappers for the Cocoa frameworks on macOS"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pyobjc_framework_Cocoa-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4cb4f8491ab4d9b59f5187e42383f819f7a46306a4fa25b84f126776305291d1"},
- {file = "pyobjc_framework_Cocoa-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5f31021f4f8fdf873b57a97ee1f3c1620dbe285e0b4eaed73dd0005eb72fd773"},
- {file = "pyobjc_framework_Cocoa-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11b4e0bad4bbb44a4edda128612f03cdeab38644bbf174de0c13129715497296"},
- {file = "pyobjc_framework_Cocoa-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:de5e62e5ccf2871a94acf3bf79646b20ea893cc9db78afa8d1fe1b0d0f7cbdb0"},
- {file = "pyobjc_framework_Cocoa-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c5af24610ab639bd1f521ce4500484b40787f898f691b7a23da3339e6bc8b90"},
- {file = "pyobjc_framework_Cocoa-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a7151186bb7805deea434fae9a4423335e6371d105f29e73cc2036c6779a9dbc"},
- {file = "pyobjc_framework_Cocoa-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:743d2a1ac08027fd09eab65814c79002a1d0421d7c0074ffd1217b6560889744"},
- {file = "pyobjc_framework_cocoa-10.3.1.tar.gz", hash = "sha256:1cf20714daaa986b488fb62d69713049f635c9d41a60c8da97d835710445281a"},
-]
-
-[package.dependencies]
-pyobjc-core = ">=10.3.1"
-
-[[package]]
-name = "pyobjc-framework-coretext"
-version = "10.3.1"
-description = "Wrappers for the framework CoreText on macOS"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pyobjc_framework_CoreText-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd6123cfccc38e32be884d1a13fb62bd636ecb192b9e8ae2b8011c977dec229e"},
- {file = "pyobjc_framework_CoreText-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:834142a14235bd80edaef8d3a28d1e203ed3c988810a9b78005df7c561390288"},
- {file = "pyobjc_framework_CoreText-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ae6c09d29eeaf30a67aa70e08a465b1f1e47d12e22b3a34ae8bc8fdb7e2e7342"},
- {file = "pyobjc_framework_CoreText-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:51ca95df1db9401366f11a7467f64be57f9a0630d31c357237d4062df0216938"},
- {file = "pyobjc_framework_CoreText-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8b75bdc267945b3f33c937c108d79405baf9d7c4cd530f922e5df243082a5031"},
- {file = "pyobjc_framework_CoreText-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:029b24c338f58fc32a004256d8559507e4f366dfe4eb09d3144273d536012d90"},
- {file = "pyobjc_framework_CoreText-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:418a55047dbff999fcd2b78cca167c4105587020b6c51567cfa28993bbfdc8ed"},
- {file = "pyobjc_framework_coretext-10.3.1.tar.gz", hash = "sha256:b8fa2d5078ed774431ae64ba886156e319aec0b8c6cc23dabfd86778265b416f"},
-]
-
-[package.dependencies]
-pyobjc-core = ">=10.3.1"
-pyobjc-framework-Cocoa = ">=10.3.1"
-pyobjc-framework-Quartz = ">=10.3.1"
-
-[[package]]
-name = "pyobjc-framework-quartz"
-version = "10.3.1"
-description = "Wrappers for the Quartz frameworks on macOS"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pyobjc_framework_Quartz-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ef4fd315ed2bc42ef77fdeb2bae28a88ec986bd7b8079a87ba3b3475348f96e"},
- {file = "pyobjc_framework_Quartz-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:96578d4a3e70164efe44ad7dc320ecd4e211758ffcde5dcd694de1bbdfe090a4"},
- {file = "pyobjc_framework_Quartz-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ca35f92486869a41847a1703bb176aab8a53dbfd8e678d1f4d68d8e6e1581c71"},
- {file = "pyobjc_framework_Quartz-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:00a0933267e3a46ea4afcc35d117b2efb920f06de797fa66279c52e7057e3590"},
- {file = "pyobjc_framework_Quartz-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a161bedb4c5257a02ad56a910cd7eefb28bdb0ea78607df0d70ed4efe4ea54c1"},
- {file = "pyobjc_framework_Quartz-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d7a8028e117a94923a511944bfa9daf9744e212f06cf89010c60934a479863a5"},
- {file = "pyobjc_framework_Quartz-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:de00c983b3267eb26fa42c6ed9f15e2bf006bde8afa7fe2b390646aa21a5d6fc"},
- {file = "pyobjc_framework_quartz-10.3.1.tar.gz", hash = "sha256:b6d7e346d735c9a7f147cd78e6da79eeae416a0b7d3874644c83a23786c6f886"},
-]
-
-[package.dependencies]
-pyobjc-core = ">=10.3.1"
-pyobjc-framework-Cocoa = ">=10.3.1"
-
-[[package]]
-name = "pyopengl"
-version = "3.1.7"
-description = "Standard OpenGL bindings for Python"
-optional = true
-python-versions = "*"
-files = [
- {file = "PyOpenGL-3.1.7-py3-none-any.whl", hash = "sha256:a6ab19cf290df6101aaf7470843a9c46207789855746399d0af92521a0a92b7a"},
- {file = "PyOpenGL-3.1.7.tar.gz", hash = "sha256:eef31a3888e6984fd4d8e6c9961b184c9813ca82604d37fe3da80eb000a76c86"},
-]
-
-[[package]]
-name = "pyparsing"
-version = "3.1.4"
-description = "pyparsing module - Classes and methods to define and execute parsing grammars"
-optional = true
-python-versions = ">=3.6.8"
-files = [
- {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"},
- {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"},
-]
-
-[package.extras]
-diagrams = ["jinja2", "railroad-diagrams"]
-
-[[package]]
-name = "pyrealsense2"
-version = "2.55.1.6486"
-description = "Python Wrapper for Intel Realsense SDK 2.0."
-optional = true
-python-versions = "*"
-files = [
- {file = "pyrealsense2-2.55.1.6486-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:882613808289c602b23f2e19bf1fbadd63fb3af9be9c2997cc4ea74741a65136"},
- {file = "pyrealsense2-2.55.1.6486-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:686320811ef30c162c7240cb619e9b152420c0a32337a137139276c87f213336"},
- {file = "pyrealsense2-2.55.1.6486-cp310-cp310-win_amd64.whl", hash = "sha256:600e7c691c7c50043a2c930471f873da33badce9c5b8c75a8bad499389ac10a4"},
- {file = "pyrealsense2-2.55.1.6486-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:178f92caec6e8e212a7ced66c678dec47462b0b77e929fa576d02eea297bb177"},
- {file = "pyrealsense2-2.55.1.6486-cp311-cp311-win_amd64.whl", hash = "sha256:e01939a63bac3e1a4da742f7e1dbc618a4ec03ee0f7b3690ae5d1ad0c983aca8"},
- {file = "pyrealsense2-2.55.1.6486-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5a40216386b49a520b5817afe97efa9a53471747b765e8b4e6ca549678945c04"},
- {file = "pyrealsense2-2.55.1.6486-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:affb80b0cad7a732db4e450e9d4a8f7193499e3b35c0ce0b3e67fde5b1e9cf64"},
- {file = "pyrealsense2-2.55.1.6486-cp37-cp37m-win_amd64.whl", hash = "sha256:93f81f0955037a325529d93059138e5036fc5e51d5fda4b3c88ae4287fa0b3ed"},
- {file = "pyrealsense2-2.55.1.6486-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0f51350ea100215dedc757ea7872ec23342a1d84015e87583911912d882c8ce2"},
- {file = "pyrealsense2-2.55.1.6486-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3695ae04d423d6404db4d0a756c66a0f122f9e0858c91d3dcee132adbef35b62"},
- {file = "pyrealsense2-2.55.1.6486-cp38-cp38-win_amd64.whl", hash = "sha256:f06ea7adadcdcb7d3334b8f067e4f7a361f6421a763988897d52602937c716de"},
- {file = "pyrealsense2-2.55.1.6486-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:18b5650b1ffcc2c2a42c9f72870d291509afc5819db757f5f365c42a8aae4129"},
- {file = "pyrealsense2-2.55.1.6486-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:aac4fb7f9a36ff6ecaa3bf0565f3baa9327b02dd843f14933eece8a4455c6c79"},
- {file = "pyrealsense2-2.55.1.6486-cp39-cp39-win_amd64.whl", hash = "sha256:5cbede3cd35946f3051ae6df42619ea01419c58379533c596bbad5dbf648c25b"},
-]
-
-[[package]]
-name = "pyrender"
-version = "0.1.45"
-description = "Easy-to-use Python renderer for 3D visualization"
-optional = true
-python-versions = "*"
-files = []
-develop = false
-
-[package.dependencies]
-freetype-py = "*"
-imageio = "*"
-networkx = "*"
-numpy = "*"
-Pillow = "*"
-pyglet = ">=1.4.10"
-PyOpenGL = ">=3.1.0,<3.2.0"
-scipy = "*"
-six = "*"
-trimesh = "*"
-
-[package.extras]
-dev = ["flake8", "pre-commit", "pytest", "pytest-cov", "tox"]
-docs = ["sphinx", "sphinx-automodapi", "sphinx-rtd-theme"]
-
-[package.source]
-type = "git"
-url = "https://github.com/mmatl/pyrender.git"
-reference = "HEAD"
-resolved_reference = "a59963ef890891656fd17c90e12d663233dcaa99"
-
-[[package]]
-name = "pyribbit"
-version = "0.1.46"
-description = "Easy-to-use Python renderer for 3D visualization"
-optional = true
-python-versions = "*"
-files = [
- {file = "pyribbit-0.1.46-py3-none-any.whl", hash = "sha256:0d4943f7cc6903f20ef42787e9357d7bb25c95f2c04da9dfa1a8021bdf9e0ab6"},
- {file = "pyribbit-0.1.46.tar.gz", hash = "sha256:3bb7a31841549ed74c50e31415738d2494b720df825cf387501f17102299940b"},
-]
-
-[package.dependencies]
-freetype-py = "*"
-imageio = "*"
-networkx = "*"
-numpy = "*"
-Pillow = "*"
-pyglet = ">=1.4.10"
-PyOpenGL = ">=3.1.0"
-scipy = "*"
-six = "*"
-trimesh = "*"
-
-[package.extras]
-dev = ["flake8", "pre-commit", "pytest", "pytest-cov", "tox"]
-docs = ["sphinx", "sphinx-automodapi", "sphinx-rtd-theme"]
-
-[[package]]
-name = "pyserial"
-version = "3.5"
-description = "Python Serial Port Extension"
-optional = true
-python-versions = "*"
-files = [
- {file = "pyserial-3.5-py2.py3-none-any.whl", hash = "sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0"},
- {file = "pyserial-3.5.tar.gz", hash = "sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb"},
-]
-
-[package.extras]
-cp2110 = ["hidapi"]
-
-[[package]]
-name = "pysocks"
-version = "1.7.1"
-description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information."
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
- {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"},
- {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"},
- {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"},
-]
-
-[[package]]
-name = "pytest"
-version = "8.3.3"
-description = "pytest: simple powerful testing with Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
- {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
-]
-
-[package.dependencies]
-colorama = {version = "*", markers = "sys_platform == \"win32\""}
-exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
-iniconfig = "*"
-packaging = "*"
-pluggy = ">=1.5,<2"
-tomli = {version = ">=1", markers = "python_version < \"3.11\""}
-
-[package.extras]
-dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
-
-[[package]]
-name = "pytest-cov"
-version = "5.0.0"
-description = "Pytest plugin for measuring coverage."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"},
- {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"},
-]
-
-[package.dependencies]
-coverage = {version = ">=5.2.1", extras = ["toml"]}
-pytest = ">=4.6"
-
-[package.extras]
-testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
-
-[[package]]
-name = "python-dateutil"
-version = "2.9.0.post0"
-description = "Extensions to the standard Python datetime module"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-files = [
- {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
- {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
-]
-
-[package.dependencies]
-six = ">=1.5"
-
-[[package]]
-name = "python-json-logger"
-version = "2.0.7"
-description = "A python library adding a json log formatter"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"},
- {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"},
-]
-
-[[package]]
-name = "python-utils"
-version = "3.9.0"
-description = "Python Utils is a module with some convenient utilities not included with the standard Python install"
-optional = true
-python-versions = ">3.9.0"
-files = [
- {file = "python_utils-3.9.0-py2.py3-none-any.whl", hash = "sha256:a7719a5ef4bae7360d2a15c13b08c4e3c3e39b9df19bd16f119ff8d0cfeaafb7"},
- {file = "python_utils-3.9.0.tar.gz", hash = "sha256:3689556884e3ae53aec5a4c9f17b36e752a3e93a7ba2768c6553fc4dd6fa70ef"},
-]
-
-[package.dependencies]
-typing-extensions = ">3.10.0.2"
-
-[package.extras]
-docs = ["mock", "python-utils", "sphinx"]
-loguru = ["loguru"]
-tests = ["blessings", "loguru", "loguru-mypy", "mypy-ipython", "pyright", "pytest", "pytest-asyncio", "pytest-cov", "pytest-mypy", "ruff", "sphinx", "types-setuptools"]
-
-[[package]]
-name = "python-xlib"
-version = "0.33"
-description = "Python X Library"
-optional = true
-python-versions = "*"
-files = [
- {file = "python-xlib-0.33.tar.gz", hash = "sha256:55af7906a2c75ce6cb280a584776080602444f75815a7aff4d287bb2d7018b32"},
- {file = "python_xlib-0.33-py2.py3-none-any.whl", hash = "sha256:c3534038d42e0df2f1392a1b30a15a4ff5fdc2b86cfa94f072bf11b10a164398"},
-]
-
-[package.dependencies]
-six = ">=1.10.0"
-
-[[package]]
-name = "pytz"
-version = "2024.2"
-description = "World timezone definitions, modern and historical"
-optional = false
-python-versions = "*"
-files = [
- {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"},
- {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"},
-]
-
-[[package]]
-name = "pyusb"
-version = "1.2.1"
-description = "Python USB access module"
-optional = true
-python-versions = ">=3.6.0"
-files = [
- {file = "pyusb-1.2.1-py3-none-any.whl", hash = "sha256:2b4c7cb86dbadf044dfb9d3a4ff69fd217013dbe78a792177a3feb172449ea36"},
- {file = "pyusb-1.2.1.tar.gz", hash = "sha256:a4cc7404a203144754164b8b40994e2849fde1cfff06b08492f12fff9d9de7b9"},
-]
-
-[[package]]
-name = "pywinpty"
-version = "2.0.13"
-description = "Pseudo terminal support for Windows from Python."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "pywinpty-2.0.13-cp310-none-win_amd64.whl", hash = "sha256:697bff211fb5a6508fee2dc6ff174ce03f34a9a233df9d8b5fe9c8ce4d5eaf56"},
- {file = "pywinpty-2.0.13-cp311-none-win_amd64.whl", hash = "sha256:b96fb14698db1284db84ca38c79f15b4cfdc3172065b5137383910567591fa99"},
- {file = "pywinpty-2.0.13-cp312-none-win_amd64.whl", hash = "sha256:2fd876b82ca750bb1333236ce98488c1be96b08f4f7647cfdf4129dfad83c2d4"},
- {file = "pywinpty-2.0.13-cp38-none-win_amd64.whl", hash = "sha256:61d420c2116c0212808d31625611b51caf621fe67f8a6377e2e8b617ea1c1f7d"},
- {file = "pywinpty-2.0.13-cp39-none-win_amd64.whl", hash = "sha256:71cb613a9ee24174730ac7ae439fd179ca34ccb8c5349e8d7b72ab5dea2c6f4b"},
- {file = "pywinpty-2.0.13.tar.gz", hash = "sha256:c34e32351a3313ddd0d7da23d27f835c860d32fe4ac814d372a3ea9594f41dde"},
-]
-
-[[package]]
-name = "pyyaml"
-version = "6.0.2"
-description = "YAML parser and emitter for Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
- {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
- {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
- {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
- {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
- {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
- {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
- {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
- {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
- {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
- {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
- {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
- {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
- {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
- {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
- {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
- {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
- {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
- {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
- {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
- {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
- {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
- {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
- {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
- {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
- {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
- {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
- {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
- {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
- {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
- {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
- {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
- {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
- {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
- {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
- {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
- {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
- {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
- {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
- {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
- {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
- {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
- {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
- {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
- {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
- {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
- {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
- {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
- {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
- {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
- {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
- {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
- {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
-]
-
-[[package]]
-name = "pyzmq"
-version = "26.2.0"
-description = "Python bindings for 0MQ"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"},
- {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"},
- {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"},
- {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"},
- {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"},
- {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"},
- {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"},
- {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"},
- {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"},
- {file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"},
- {file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"},
- {file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"},
- {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"},
- {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"},
- {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"},
- {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"},
- {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"},
- {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"},
- {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"},
- {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"},
- {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"},
- {file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"},
- {file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"},
- {file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"},
- {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"},
- {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"},
- {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"},
- {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"},
- {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"},
- {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"},
- {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"},
- {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"},
- {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"},
- {file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"},
- {file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"},
- {file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"},
- {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"},
- {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"},
- {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"},
- {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"},
- {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"},
- {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"},
- {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"},
- {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"},
- {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"},
- {file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"},
- {file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"},
- {file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"},
- {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"},
- {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"},
- {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"},
- {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"},
- {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"},
- {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"},
- {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"},
- {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"},
- {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"},
- {file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"},
- {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"},
- {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"},
- {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"},
- {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"},
- {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"},
- {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"},
- {file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"},
- {file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"},
- {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"},
- {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"},
- {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"},
- {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"},
- {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"},
- {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"},
- {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"},
- {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"},
- {file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"},
- {file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"},
- {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"},
- {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"},
- {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"},
- {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"},
- {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"},
- {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"},
- {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"},
- {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"},
- {file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"},
- {file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"},
- {file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"},
- {file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"},
- {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"},
- {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"},
- {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"},
- {file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"},
- {file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"},
- {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"},
- {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"},
- {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"},
- {file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"},
- {file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"},
- {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"},
- {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"},
- {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"},
- {file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"},
- {file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"},
- {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"},
- {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"},
- {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"},
- {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"},
- {file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"},
- {file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"},
-]
-
-[package.dependencies]
-cffi = {version = "*", markers = "implementation_name == \"pypy\""}
-
-[[package]]
-name = "referencing"
-version = "0.35.1"
-description = "JSON Referencing + Python"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"},
- {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"},
-]
-
-[package.dependencies]
-attrs = ">=22.2.0"
-rpds-py = ">=0.7.0"
-
-[[package]]
-name = "regex"
-version = "2024.9.11"
-description = "Alternative regular expression module, to replace re."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"},
- {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"},
- {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"},
- {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"},
- {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"},
- {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"},
- {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"},
- {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"},
- {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"},
- {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"},
- {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"},
- {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"},
- {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"},
- {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"},
- {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"},
- {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"},
- {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"},
- {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"},
- {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"},
- {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"},
- {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"},
- {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"},
- {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"},
- {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"},
- {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"},
- {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"},
- {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"},
- {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"},
- {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"},
- {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"},
- {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"},
-]
-
-[[package]]
-name = "renamed-opencv-python-inference-engine"
-version = "2022.1.5"
-description = "Wrapper package for OpenCV with Inference Engine python bindings, but compiled under another namespace to prevent conflicts with the default OpenCV python packages"
-optional = true
-python-versions = "*"
-files = [
- {file = "renamed_opencv_python_inference_engine-2022.1.5-py3-none-manylinux1_x86_64.whl", hash = "sha256:c92666acfd75f8b29b9f1aa566d4ad3851387fcea3992f113f72adf449477523"},
-]
-
-[package.dependencies]
-numpy = "*"
-
-[[package]]
-name = "requests"
-version = "2.32.3"
-description = "Python HTTP for Humans."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
- {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
-]
-
-[package.dependencies]
-certifi = ">=2017.4.17"
-charset-normalizer = ">=2,<4"
-idna = ">=2.5,<4"
-PySocks = {version = ">=1.5.6,<1.5.7 || >1.5.7", optional = true, markers = "extra == \"socks\""}
-urllib3 = ">=1.21.1,<3"
-
-[package.extras]
-socks = ["PySocks (>=1.5.6,!=1.5.7)"]
-use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
-
-[[package]]
-name = "rerun-sdk"
-version = "0.21.0"
-description = "The Rerun Logging SDK"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "rerun_sdk-0.21.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1e454ceea31c70ae9ec1bb26eaa82828661b7657ab4d2261ca0b94006d6a1975"},
- {file = "rerun_sdk-0.21.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:84ecb77b0b5bac71b53e849801ff073de89fcd2f1e0ca0da62fb18fcbeceadf0"},
- {file = "rerun_sdk-0.21.0-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:919d921165c3238490dbe5bf00a062c68fdd2c54dc14aac6a1914c82edb5d9c8"},
- {file = "rerun_sdk-0.21.0-cp38-abi3-manylinux_2_31_x86_64.whl", hash = "sha256:897649aadcab7014b78096f93c84c61c00a227b80adaf0dec279924b5aab53d8"},
- {file = "rerun_sdk-0.21.0-cp38-abi3-win_amd64.whl", hash = "sha256:2060bdb536a198f0f04789ba5ba771e66587e7851d668b3dfab257a5efa16819"},
-]
-
-[package.dependencies]
-attrs = ">=23.1.0"
-numpy = ">=1.23"
-pillow = ">=8.0.0"
-pyarrow = ">=14.0.2"
-typing-extensions = ">=4.5"
-
-[package.extras]
-notebook = ["rerun-notebook (==0.21.0)"]
-tests = ["pytest (==7.1.2)"]
-
-[[package]]
-name = "rfc3339-validator"
-version = "0.1.4"
-description = "A pure python RFC3339 validator"
-optional = true
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-files = [
- {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"},
- {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"},
-]
-
-[package.dependencies]
-six = "*"
-
-[[package]]
-name = "rfc3986-validator"
-version = "0.1.1"
-description = "Pure python rfc3986 validator"
-optional = true
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-files = [
- {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"},
- {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"},
-]
-
-[[package]]
-name = "rich"
-version = "13.9.2"
-description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-optional = true
-python-versions = ">=3.8.0"
-files = [
- {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"},
- {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"},
-]
-
-[package.dependencies]
-markdown-it-py = ">=2.2.0"
-pygments = ">=2.13.0,<3.0.0"
-typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""}
-
-[package.extras]
-jupyter = ["ipywidgets (>=7.5.1,<9)"]
-
-[[package]]
-name = "rpds-py"
-version = "0.20.0"
-description = "Python bindings to Rust's persistent data structures (rpds)"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"},
- {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"},
- {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"},
- {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"},
- {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"},
- {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"},
- {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"},
- {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"},
- {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"},
- {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"},
- {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"},
- {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"},
- {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"},
- {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"},
- {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"},
- {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"},
- {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"},
- {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"},
- {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"},
- {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"},
- {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"},
- {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"},
- {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"},
- {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"},
- {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"},
- {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"},
- {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"},
- {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"},
- {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"},
- {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"},
- {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"},
- {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"},
- {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"},
- {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"},
- {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"},
- {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"},
- {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"},
- {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"},
- {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"},
- {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"},
- {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"},
- {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"},
- {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"},
-]
-
-[[package]]
-name = "rplidar-roboticia"
-version = "0.9.5"
-description = "Simple and lightweight module for working with RPLidar laser scanners"
-optional = true
-python-versions = "*"
-files = [
- {file = "rplidar-roboticia-0.9.5.tar.gz", hash = "sha256:709e9143f7701d69e8439231b065e676f7d5a6086cd2922113b055bedf99f0e3"},
-]
-
-[package.dependencies]
-pyserial = "*"
-
-[[package]]
-name = "safetensors"
-version = "0.4.5"
-description = ""
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "safetensors-0.4.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7"},
- {file = "safetensors-0.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27"},
- {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6885016f34bef80ea1085b7e99b3c1f92cb1be78a49839203060f67b40aee761"},
- {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:133620f443450429322f238fda74d512c4008621227fccf2f8cf4a76206fea7c"},
- {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fb3e0609ec12d2a77e882f07cced530b8262027f64b75d399f1504ffec0ba56"},
- {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0f1dd769f064adc33831f5e97ad07babbd728427f98e3e1db6902e369122737"},
- {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6d156bdb26732feada84f9388a9f135528c1ef5b05fae153da365ad4319c4c5"},
- {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e347d77e2c77eb7624400ccd09bed69d35c0332f417ce8c048d404a096c593b"},
- {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9f556eea3aec1d3d955403159fe2123ddd68e880f83954ee9b4a3f2e15e716b6"},
- {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9483f42be3b6bc8ff77dd67302de8ae411c4db39f7224dec66b0eb95822e4163"},
- {file = "safetensors-0.4.5-cp310-none-win32.whl", hash = "sha256:7389129c03fadd1ccc37fd1ebbc773f2b031483b04700923c3511d2a939252cc"},
- {file = "safetensors-0.4.5-cp310-none-win_amd64.whl", hash = "sha256:e98ef5524f8b6620c8cdef97220c0b6a5c1cef69852fcd2f174bb96c2bb316b1"},
- {file = "safetensors-0.4.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:21f848d7aebd5954f92538552d6d75f7c1b4500f51664078b5b49720d180e47c"},
- {file = "safetensors-0.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb07000b19d41e35eecef9a454f31a8b4718a185293f0d0b1c4b61d6e4487971"},
- {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09dedf7c2fda934ee68143202acff6e9e8eb0ddeeb4cfc24182bef999efa9f42"},
- {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:59b77e4b7a708988d84f26de3ebead61ef1659c73dcbc9946c18f3b1786d2688"},
- {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d3bc83e14d67adc2e9387e511097f254bd1b43c3020440e708858c684cbac68"},
- {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39371fc551c1072976073ab258c3119395294cf49cdc1f8476794627de3130df"},
- {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6c19feda32b931cae0acd42748a670bdf56bee6476a046af20181ad3fee4090"},
- {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a659467495de201e2f282063808a41170448c78bada1e62707b07a27b05e6943"},
- {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bad5e4b2476949bcd638a89f71b6916fa9a5cae5c1ae7eede337aca2100435c0"},
- {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a3a315a6d0054bc6889a17f5668a73f94f7fe55121ff59e0a199e3519c08565f"},
- {file = "safetensors-0.4.5-cp311-none-win32.whl", hash = "sha256:a01e232e6d3d5cf8b1667bc3b657a77bdab73f0743c26c1d3c5dd7ce86bd3a92"},
- {file = "safetensors-0.4.5-cp311-none-win_amd64.whl", hash = "sha256:cbd39cae1ad3e3ef6f63a6f07296b080c951f24cec60188378e43d3713000c04"},
- {file = "safetensors-0.4.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e"},
- {file = "safetensors-0.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e"},
- {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f"},
- {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461"},
- {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea"},
- {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed"},
- {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c"},
- {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1"},
- {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4"},
- {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646"},
- {file = "safetensors-0.4.5-cp312-none-win32.whl", hash = "sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6"},
- {file = "safetensors-0.4.5-cp312-none-win_amd64.whl", hash = "sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532"},
- {file = "safetensors-0.4.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:25e5f8e2e92a74f05b4ca55686234c32aac19927903792b30ee6d7bd5653d54e"},
- {file = "safetensors-0.4.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:81efb124b58af39fcd684254c645e35692fea81c51627259cdf6d67ff4458916"},
- {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:585f1703a518b437f5103aa9cf70e9bd437cb78eea9c51024329e4fb8a3e3679"},
- {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b99fbf72e3faf0b2f5f16e5e3458b93b7d0a83984fe8d5364c60aa169f2da89"},
- {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b17b299ca9966ca983ecda1c0791a3f07f9ca6ab5ded8ef3d283fff45f6bcd5f"},
- {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76ded72f69209c9780fdb23ea89e56d35c54ae6abcdec67ccb22af8e696e449a"},
- {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2783956926303dcfeb1de91a4d1204cd4089ab441e622e7caee0642281109db3"},
- {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d94581aab8c6b204def4d7320f07534d6ee34cd4855688004a4354e63b639a35"},
- {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:67e1e7cb8678bb1b37ac48ec0df04faf689e2f4e9e81e566b5c63d9f23748523"},
- {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbd280b07e6054ea68b0cb4b16ad9703e7d63cd6890f577cb98acc5354780142"},
- {file = "safetensors-0.4.5-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:77d9b228da8374c7262046a36c1f656ba32a93df6cc51cd4453af932011e77f1"},
- {file = "safetensors-0.4.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:500cac01d50b301ab7bb192353317035011c5ceeef0fca652f9f43c000bb7f8d"},
- {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75331c0c746f03158ded32465b7d0b0e24c5a22121743662a2393439c43a45cf"},
- {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670e95fe34e0d591d0529e5e59fd9d3d72bc77b1444fcaa14dccda4f36b5a38b"},
- {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:098923e2574ff237c517d6e840acada8e5b311cb1fa226019105ed82e9c3b62f"},
- {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ca0902d2648775089fa6a0c8fc9e6390c5f8ee576517d33f9261656f851e3f"},
- {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f0032bedc869c56f8d26259fe39cd21c5199cd57f2228d817a0e23e8370af25"},
- {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4b15f51b4f8f2a512341d9ce3475cacc19c5fdfc5db1f0e19449e75f95c7dc8"},
- {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f6594d130d0ad933d885c6a7b75c5183cb0e8450f799b80a39eae2b8508955eb"},
- {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:60c828a27e852ded2c85fc0f87bf1ec20e464c5cd4d56ff0e0711855cc2e17f8"},
- {file = "safetensors-0.4.5-cp37-none-win32.whl", hash = "sha256:6d3de65718b86c3eeaa8b73a9c3d123f9307a96bbd7be9698e21e76a56443af5"},
- {file = "safetensors-0.4.5-cp37-none-win_amd64.whl", hash = "sha256:5a2d68a523a4cefd791156a4174189a4114cf0bf9c50ceb89f261600f3b2b81a"},
- {file = "safetensors-0.4.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:e7a97058f96340850da0601a3309f3d29d6191b0702b2da201e54c6e3e44ccf0"},
- {file = "safetensors-0.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:63bfd425e25f5c733f572e2246e08a1c38bd6f2e027d3f7c87e2e43f228d1345"},
- {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3664ac565d0e809b0b929dae7ccd74e4d3273cd0c6d1220c6430035befb678e"},
- {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:313514b0b9b73ff4ddfb4edd71860696dbe3c1c9dc4d5cc13dbd74da283d2cbf"},
- {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31fa33ee326f750a2f2134a6174773c281d9a266ccd000bd4686d8021f1f3dac"},
- {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09566792588d77b68abe53754c9f1308fadd35c9f87be939e22c623eaacbed6b"},
- {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309aaec9b66cbf07ad3a2e5cb8a03205663324fea024ba391594423d0f00d9fe"},
- {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53946c5813b8f9e26103c5efff4a931cc45d874f45229edd68557ffb35ffb9f8"},
- {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:868f9df9e99ad1e7f38c52194063a982bc88fedc7d05096f4f8160403aaf4bd6"},
- {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9cc9449bd0b0bc538bd5e268221f0c5590bc5c14c1934a6ae359d44410dc68c4"},
- {file = "safetensors-0.4.5-cp38-none-win32.whl", hash = "sha256:83c4f13a9e687335c3928f615cd63a37e3f8ef072a3f2a0599fa09f863fb06a2"},
- {file = "safetensors-0.4.5-cp38-none-win_amd64.whl", hash = "sha256:b98d40a2ffa560653f6274e15b27b3544e8e3713a44627ce268f419f35c49478"},
- {file = "safetensors-0.4.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf727bb1281d66699bef5683b04d98c894a2803442c490a8d45cd365abfbdeb2"},
- {file = "safetensors-0.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96f1d038c827cdc552d97e71f522e1049fef0542be575421f7684756a748e457"},
- {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:139fbee92570ecea774e6344fee908907db79646d00b12c535f66bc78bd5ea2c"},
- {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c36302c1c69eebb383775a89645a32b9d266878fab619819ce660309d6176c9b"},
- {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d641f5b8149ea98deb5ffcf604d764aad1de38a8285f86771ce1abf8e74c4891"},
- {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4db6a61d968de73722b858038c616a1bebd4a86abe2688e46ca0cc2d17558f2"},
- {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b75a616e02f21b6f1d5785b20cecbab5e2bd3f6358a90e8925b813d557666ec1"},
- {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:788ee7d04cc0e0e7f944c52ff05f52a4415b312f5efd2ee66389fb7685ee030c"},
- {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:87bc42bd04fd9ca31396d3ca0433db0be1411b6b53ac5a32b7845a85d01ffc2e"},
- {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4037676c86365a721a8c9510323a51861d703b399b78a6b4486a54a65a975fca"},
- {file = "safetensors-0.4.5-cp39-none-win32.whl", hash = "sha256:1500418454529d0ed5c1564bda376c4ddff43f30fce9517d9bee7bcce5a8ef50"},
- {file = "safetensors-0.4.5-cp39-none-win_amd64.whl", hash = "sha256:9d1a94b9d793ed8fe35ab6d5cea28d540a46559bafc6aae98f30ee0867000cab"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdadf66b5a22ceb645d5435a0be7a0292ce59648ca1d46b352f13cff3ea80410"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d42ffd4c2259f31832cb17ff866c111684c87bd930892a1ba53fed28370c918c"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8a1f6d2063a92cd04145c7fd9e31a1c7d85fbec20113a14b487563fdbc0597"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:951d2fcf1817f4fb0ef0b48f6696688a4e852a95922a042b3f96aaa67eedc920"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ac85d9a8c1af0e3132371d9f2d134695a06a96993c2e2f0bbe25debb9e3f67a"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e3cec4a29eb7fe8da0b1c7988bc3828183080439dd559f720414450de076fcab"},
- {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:21742b391b859e67b26c0b2ac37f52c9c0944a879a25ad2f9f9f3cd61e7fda8f"},
- {file = "safetensors-0.4.5-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c7db3006a4915151ce1913652e907cdede299b974641a83fbc092102ac41b644"},
- {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f68bf99ea970960a237f416ea394e266e0361895753df06e3e06e6ea7907d98b"},
- {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8158938cf3324172df024da511839d373c40fbfaa83e9abf467174b2910d7b4c"},
- {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:540ce6c4bf6b58cb0fd93fa5f143bc0ee341c93bb4f9287ccd92cf898cc1b0dd"},
- {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bfeaa1a699c6b9ed514bd15e6a91e74738b71125a9292159e3d6b7f0a53d2cde"},
- {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:01c8f00da537af711979e1b42a69a8ec9e1d7112f208e0e9b8a35d2c381085ef"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a0dd565f83b30f2ca79b5d35748d0d99dd4b3454f80e03dfb41f0038e3bdf180"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:023b6e5facda76989f4cba95a861b7e656b87e225f61811065d5c501f78cdb3f"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9633b663393d5796f0b60249549371e392b75a0b955c07e9c6f8708a87fc841f"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78dd8adfb48716233c45f676d6e48534d34b4bceb50162c13d1f0bdf6f78590a"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e8deb16c4321d61ae72533b8451ec4a9af8656d1c61ff81aa49f966406e4b68"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:52452fa5999dc50c4decaf0c53aa28371f7f1e0fe5c2dd9129059fbe1e1599c7"},
- {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d5f23198821e227cfc52d50fa989813513db381255c6d100927b012f0cfec63d"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f4beb84b6073b1247a773141a6331117e35d07134b3bb0383003f39971d414bb"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:68814d599d25ed2fdd045ed54d370d1d03cf35e02dce56de44c651f828fb9b7b"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b6453c54c57c1781292c46593f8a37254b8b99004c68d6c3ce229688931a22"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adaa9c6dead67e2dd90d634f89131e43162012479d86e25618e821a03d1eb1dc"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73e7d408e9012cd17511b382b43547850969c7979efc2bc353f317abaf23c84c"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:775409ce0fcc58b10773fdb4221ed1eb007de10fe7adbdf8f5e8a56096b6f0bc"},
- {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:834001bed193e4440c4a3950a31059523ee5090605c907c66808664c932b549c"},
- {file = "safetensors-0.4.5.tar.gz", hash = "sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310"},
-]
-
-[package.extras]
-all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"]
-dev = ["safetensors[all]"]
-jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"]
-mlx = ["mlx (>=0.0.9)"]
-numpy = ["numpy (>=1.21.6)"]
-paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"]
-pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"]
-quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"]
-tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"]
-testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"]
-torch = ["safetensors[numpy]", "torch (>=1.10)"]
-
-[[package]]
-name = "scikit-image"
-version = "0.24.0"
-description = "Image processing in Python"
-optional = true
-python-versions = ">=3.9"
-files = [
- {file = "scikit_image-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb3bc0264b6ab30b43c4179ee6156bc18b4861e78bb329dd8d16537b7bbf827a"},
- {file = "scikit_image-0.24.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9c7a52e20cdd760738da38564ba1fed7942b623c0317489af1a598a8dedf088b"},
- {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93f46e6ce42e5409f4d09ce1b0c7f80dd7e4373bcec635b6348b63e3c886eac8"},
- {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39ee0af13435c57351a3397eb379e72164ff85161923eec0c38849fecf1b4764"},
- {file = "scikit_image-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:7ac7913b028b8aa780ffae85922894a69e33d1c0bf270ea1774f382fe8bf95e7"},
- {file = "scikit_image-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:272909e02a59cea3ed4aa03739bb88df2625daa809f633f40b5053cf09241831"},
- {file = "scikit_image-0.24.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:190ebde80b4470fe8838764b9b15f232a964f1a20391663e31008d76f0c696f7"},
- {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c98cc695005faf2b79904e4663796c977af22586ddf1b12d6af2fa22842dc2"},
- {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa27b3a0dbad807b966b8db2d78da734cb812ca4787f7fbb143764800ce2fa9c"},
- {file = "scikit_image-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:dacf591ac0c272a111181afad4b788a27fe70d213cfddd631d151cbc34f8ca2c"},
- {file = "scikit_image-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6fccceb54c9574590abcddc8caf6cefa57c13b5b8b4260ab3ff88ad8f3c252b3"},
- {file = "scikit_image-0.24.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ccc01e4760d655aab7601c1ba7aa4ddd8b46f494ac46ec9c268df6f33ccddf4c"},
- {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18836a18d3a7b6aca5376a2d805f0045826bc6c9fc85331659c33b4813e0b563"},
- {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8579bda9c3f78cb3b3ed8b9425213c53a25fa7e994b7ac01f2440b395babf660"},
- {file = "scikit_image-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:82ab903afa60b2da1da2e6f0c8c65e7c8868c60a869464c41971da929b3e82bc"},
- {file = "scikit_image-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef04360eda372ee5cd60aebe9be91258639c86ae2ea24093fb9182118008d009"},
- {file = "scikit_image-0.24.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e9aadb442360a7e76f0c5c9d105f79a83d6df0e01e431bd1d5757e2c5871a1f3"},
- {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e37de6f4c1abcf794e13c258dc9b7d385d5be868441de11c180363824192ff7"},
- {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4688c18bd7ec33c08d7bf0fd19549be246d90d5f2c1d795a89986629af0a1e83"},
- {file = "scikit_image-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:56dab751d20b25d5d3985e95c9b4e975f55573554bd76b0aedf5875217c93e69"},
- {file = "scikit_image-0.24.0.tar.gz", hash = "sha256:5d16efe95da8edbeb363e0c4157b99becbd650a60b77f6e3af5768b66cf007ab"},
-]
-
-[package.dependencies]
-imageio = ">=2.33"
-lazy-loader = ">=0.4"
-networkx = ">=2.8"
-numpy = ">=1.23"
-packaging = ">=21"
-pillow = ">=9.1"
-scipy = ">=1.9"
-tifffile = ">=2022.8.12"
-
-[package.extras]
-build = ["Cython (>=3.0.4)", "build", "meson-python (>=0.15)", "ninja", "numpy (>=2.0.0rc1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.8)", "wheel"]
-data = ["pooch (>=1.6.0)"]
-developer = ["ipython", "pre-commit", "tomli"]
-docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.6)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.15.2)", "pytest-doctestplus", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"]
-optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.6)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"]
-test = ["asv", "numpydoc (>=1.7)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-doctestplus", "pytest-faulthandler", "pytest-localserver"]
-
-[[package]]
-name = "scipy"
-version = "1.14.1"
-description = "Fundamental algorithms for scientific computing in Python"
-optional = true
-python-versions = ">=3.10"
-files = [
- {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"},
- {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"},
- {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"},
- {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"},
- {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"},
- {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"},
- {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"},
- {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"},
- {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"},
- {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"},
- {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"},
- {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"},
- {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"},
- {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"},
- {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"},
- {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"},
- {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"},
- {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"},
- {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"},
- {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"},
- {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"},
- {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"},
- {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"},
- {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"},
- {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"},
- {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"},
- {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"},
- {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"},
- {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"},
- {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"},
- {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"},
- {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"},
- {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"},
-]
-
-[package.dependencies]
-numpy = ">=1.23.5,<2.3"
-
-[package.extras]
-dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"]
-doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<=7.3.7)", "sphinx-design (>=0.4.0)"]
-test = ["Cython", "array-api-strict (>=2.0)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
-
-[[package]]
-name = "send2trash"
-version = "1.8.3"
-description = "Send file to trash natively under Mac OS X, Windows and Linux"
-optional = true
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
-files = [
- {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"},
- {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"},
-]
-
-[package.extras]
-nativelib = ["pyobjc-framework-Cocoa", "pywin32"]
-objc = ["pyobjc-framework-Cocoa"]
-win32 = ["pywin32"]
-
-[[package]]
-name = "sentry-sdk"
-version = "2.16.0"
-description = "Python client for Sentry (https://sentry.io)"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"},
- {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"},
-]
-
-[package.dependencies]
-certifi = "*"
-urllib3 = ">=1.26.11"
-
-[package.extras]
-aiohttp = ["aiohttp (>=3.5)"]
-anthropic = ["anthropic (>=0.16)"]
-arq = ["arq (>=0.23)"]
-asyncpg = ["asyncpg (>=0.23)"]
-beam = ["apache-beam (>=2.12)"]
-bottle = ["bottle (>=0.12.13)"]
-celery = ["celery (>=3)"]
-celery-redbeat = ["celery-redbeat (>=2)"]
-chalice = ["chalice (>=1.16.0)"]
-clickhouse-driver = ["clickhouse-driver (>=0.2.0)"]
-django = ["django (>=1.8)"]
-falcon = ["falcon (>=1.4)"]
-fastapi = ["fastapi (>=0.79.0)"]
-flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
-grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
-http2 = ["httpcore[http2] (==1.*)"]
-httpx = ["httpx (>=0.16.0)"]
-huey = ["huey (>=2)"]
-huggingface-hub = ["huggingface-hub (>=0.22)"]
-langchain = ["langchain (>=0.0.210)"]
-litestar = ["litestar (>=2.0.0)"]
-loguru = ["loguru (>=0.5)"]
-openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
-opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
-opentelemetry-experimental = ["opentelemetry-distro"]
-pure-eval = ["asttokens", "executing", "pure-eval"]
-pymongo = ["pymongo (>=3.1)"]
-pyspark = ["pyspark (>=2.4.4)"]
-quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
-rq = ["rq (>=0.6)"]
-sanic = ["sanic (>=0.8)"]
-sqlalchemy = ["sqlalchemy (>=1.2)"]
-starlette = ["starlette (>=0.19.1)"]
-starlite = ["starlite (>=1.48)"]
-tornado = ["tornado (>=6)"]
-
-[[package]]
-name = "setproctitle"
-version = "1.3.3"
-description = "A Python module to customize the process title"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754"},
- {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452"},
- {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3"},
- {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74"},
- {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f"},
- {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39"},
- {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85"},
- {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0"},
- {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5"},
- {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d"},
- {file = "setproctitle-1.3.3-cp310-cp310-win32.whl", hash = "sha256:a1fcac43918b836ace25f69b1dca8c9395253ad8152b625064415b1d2f9be4fb"},
- {file = "setproctitle-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:200620c3b15388d7f3f97e0ae26599c0c378fdf07ae9ac5a13616e933cbd2086"},
- {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:334f7ed39895d692f753a443102dd5fed180c571eb6a48b2a5b7f5b3564908c8"},
- {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:950f6476d56ff7817a8fed4ab207727fc5260af83481b2a4b125f32844df513a"},
- {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:195c961f54a09eb2acabbfc90c413955cf16c6e2f8caa2adbf2237d1019c7dd8"},
- {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f05e66746bf9fe6a3397ec246fe481096664a9c97eb3fea6004735a4daf867fd"},
- {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5901a31012a40ec913265b64e48c2a4059278d9f4e6be628441482dd13fb8b5"},
- {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64286f8a995f2cd934082b398fc63fca7d5ffe31f0e27e75b3ca6b4efda4e353"},
- {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:184239903bbc6b813b1a8fc86394dc6ca7d20e2ebe6f69f716bec301e4b0199d"},
- {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:664698ae0013f986118064b6676d7dcd28fefd0d7d5a5ae9497cbc10cba48fa5"},
- {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e5119a211c2e98ff18b9908ba62a3bd0e3fabb02a29277a7232a6fb4b2560aa0"},
- {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:417de6b2e214e837827067048f61841f5d7fc27926f2e43954567094051aff18"},
- {file = "setproctitle-1.3.3-cp311-cp311-win32.whl", hash = "sha256:6a143b31d758296dc2f440175f6c8e0b5301ced3b0f477b84ca43cdcf7f2f476"},
- {file = "setproctitle-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a680d62c399fa4b44899094027ec9a1bdaf6f31c650e44183b50d4c4d0ccc085"},
- {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d4460795a8a7a391e3567b902ec5bdf6c60a47d791c3b1d27080fc203d11c9dc"},
- {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bdfd7254745bb737ca1384dee57e6523651892f0ea2a7344490e9caefcc35e64"},
- {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477d3da48e216d7fc04bddab67b0dcde633e19f484a146fd2a34bb0e9dbb4a1e"},
- {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab2900d111e93aff5df9fddc64cf51ca4ef2c9f98702ce26524f1acc5a786ae7"},
- {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088b9efc62d5aa5d6edf6cba1cf0c81f4488b5ce1c0342a8b67ae39d64001120"},
- {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6d50252377db62d6a0bb82cc898089916457f2db2041e1d03ce7fadd4a07381"},
- {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:87e668f9561fd3a457ba189edfc9e37709261287b52293c115ae3487a24b92f6"},
- {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:287490eb90e7a0ddd22e74c89a92cc922389daa95babc833c08cf80c84c4df0a"},
- {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe1c49486109f72d502f8be569972e27f385fe632bd8895f4730df3c87d5ac8"},
- {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4a6ba2494a6449b1f477bd3e67935c2b7b0274f2f6dcd0f7c6aceae10c6c6ba3"},
- {file = "setproctitle-1.3.3-cp312-cp312-win32.whl", hash = "sha256:2df2b67e4b1d7498632e18c56722851ba4db5d6a0c91aaf0fd395111e51cdcf4"},
- {file = "setproctitle-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:f38d48abc121263f3b62943f84cbaede05749047e428409c2c199664feb6abc7"},
- {file = "setproctitle-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:816330675e3504ae4d9a2185c46b573105d2310c20b19ea2b4596a9460a4f674"},
- {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f960bc22d8d8e4ac886d1e2e21ccbd283adcf3c43136161c1ba0fa509088e0"},
- {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e6e7adff74796ef12753ff399491b8827f84f6c77659d71bd0b35870a17d8f"},
- {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53bc0d2358507596c22b02db079618451f3bd720755d88e3cccd840bafb4c41c"},
- {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6d20f9541f5f6ac63df553b6d7a04f313947f550eab6a61aa758b45f0d5657"},
- {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c1c84beab776b0becaa368254801e57692ed749d935469ac10e2b9b825dbdd8e"},
- {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:507e8dc2891021350eaea40a44ddd887c9f006e6b599af8d64a505c0f718f170"},
- {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b1067647ac7aba0b44b591936118a22847bda3c507b0a42d74272256a7a798e9"},
- {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2e71f6365744bf53714e8bd2522b3c9c1d83f52ffa6324bd7cbb4da707312cd8"},
- {file = "setproctitle-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:7f1d36a1e15a46e8ede4e953abb104fdbc0845a266ec0e99cc0492a4364f8c44"},
- {file = "setproctitle-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9a402881ec269d0cc9c354b149fc29f9ec1a1939a777f1c858cdb09c7a261df"},
- {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ff814dea1e5c492a4980e3e7d094286077054e7ea116cbeda138819db194b2cd"},
- {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:accb66d7b3ccb00d5cd11d8c6e07055a4568a24c95cf86109894dcc0c134cc89"},
- {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554eae5a5b28f02705b83a230e9d163d645c9a08914c0ad921df363a07cf39b1"},
- {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a911b26264dbe9e8066c7531c0591cfab27b464459c74385b276fe487ca91c12"},
- {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2982efe7640c4835f7355fdb4da313ad37fb3b40f5c69069912f8048f77b28c8"},
- {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df3f4274b80709d8bcab2f9a862973d453b308b97a0b423a501bcd93582852e3"},
- {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:af2c67ae4c795d1674a8d3ac1988676fa306bcfa1e23fddb5e0bd5f5635309ca"},
- {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:af4061f67fd7ec01624c5e3c21f6b7af2ef0e6bab7fbb43f209e6506c9ce0092"},
- {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37a62cbe16d4c6294e84670b59cf7adcc73faafe6af07f8cb9adaf1f0e775b19"},
- {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a83ca086fbb017f0d87f240a8f9bbcf0809f3b754ee01cec928fff926542c450"},
- {file = "setproctitle-1.3.3-cp38-cp38-win32.whl", hash = "sha256:059f4ce86f8cc92e5860abfc43a1dceb21137b26a02373618d88f6b4b86ba9b2"},
- {file = "setproctitle-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:ab92e51cd4a218208efee4c6d37db7368fdf182f6e7ff148fb295ecddf264287"},
- {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7951820b77abe03d88b114b998867c0f99da03859e5ab2623d94690848d3e45"},
- {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc94cf128676e8fac6503b37763adb378e2b6be1249d207630f83fc325d9b11"},
- {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f5d9027eeda64d353cf21a3ceb74bb1760bd534526c9214e19f052424b37e42"},
- {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e4a8104db15d3462e29d9946f26bed817a5b1d7a47eabca2d9dc2b995991503"},
- {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c32c41ace41f344d317399efff4cffb133e709cec2ef09c99e7a13e9f3b9483c"},
- {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbf16381c7bf7f963b58fb4daaa65684e10966ee14d26f5cc90f07049bfd8c1e"},
- {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e18b7bd0898398cc97ce2dfc83bb192a13a087ef6b2d5a8a36460311cb09e775"},
- {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69d565d20efe527bd8a9b92e7f299ae5e73b6c0470f3719bd66f3cd821e0d5bd"},
- {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ddedd300cd690a3b06e7eac90ed4452348b1348635777ce23d460d913b5b63c3"},
- {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:415bfcfd01d1fbf5cbd75004599ef167a533395955305f42220a585f64036081"},
- {file = "setproctitle-1.3.3-cp39-cp39-win32.whl", hash = "sha256:21112fcd2195d48f25760f0eafa7a76510871bbb3b750219310cf88b04456ae3"},
- {file = "setproctitle-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:5a740f05d0968a5a17da3d676ce6afefebeeeb5ce137510901bf6306ba8ee002"},
- {file = "setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0"},
- {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9"},
- {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5"},
- {file = "setproctitle-1.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:200ede6fd11233085ba9b764eb055a2a191fb4ffb950c68675ac53c874c22e20"},
- {file = "setproctitle-1.3.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d3a953c50776751e80fe755a380a64cb14d61e8762bd43041ab3f8cc436092f"},
- {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e08e232b78ba3ac6bc0d23ce9e2bee8fad2be391b7e2da834fc9a45129eb87"},
- {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1da82c3e11284da4fcbf54957dafbf0655d2389cd3d54e4eaba636faf6d117a"},
- {file = "setproctitle-1.3.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:aeaa71fb9568ebe9b911ddb490c644fbd2006e8c940f21cb9a1e9425bd709574"},
- {file = "setproctitle-1.3.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:59335d000c6250c35989394661eb6287187854e94ac79ea22315469ee4f4c244"},
- {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3ba57029c9c50ecaf0c92bb127224cc2ea9fda057b5d99d3f348c9ec2855ad3"},
- {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d876d355c53d975c2ef9c4f2487c8f83dad6aeaaee1b6571453cb0ee992f55f6"},
- {file = "setproctitle-1.3.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:224602f0939e6fb9d5dd881be1229d485f3257b540f8a900d4271a2c2aa4e5f4"},
- {file = "setproctitle-1.3.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d7f27e0268af2d7503386e0e6be87fb9b6657afd96f5726b733837121146750d"},
- {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5e7266498cd31a4572378c61920af9f6b4676a73c299fce8ba93afd694f8ae7"},
- {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33c5609ad51cd99d388e55651b19148ea99727516132fb44680e1f28dd0d1de9"},
- {file = "setproctitle-1.3.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:eae8988e78192fd1a3245a6f4f382390b61bce6cfcc93f3809726e4c885fa68d"},
- {file = "setproctitle-1.3.3.tar.gz", hash = "sha256:c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae"},
-]
-
-[package.extras]
-test = ["pytest"]
-
-[[package]]
-name = "setuptools"
-version = "75.1.0"
-description = "Easily download, build, install, upgrade, and uninstall Python packages"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"},
- {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"},
-]
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"]
-core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
-enabler = ["pytest-enabler (>=2.2)"]
-test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
-type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"]
-
-[[package]]
-name = "sh"
-version = "2.1.0"
-description = "Python subprocess replacement"
-optional = true
-python-versions = "<4.0,>=3.8.1"
-files = [
- {file = "sh-2.1.0-py3-none-any.whl", hash = "sha256:bf5e44178dd96a542126c2774e9b7ab1d89bfe0e2ef84d92e6d0ed7358d63d01"},
- {file = "sh-2.1.0.tar.gz", hash = "sha256:7e27301c574bec8ca5bf6f211851357526455ee97cd27a7c4c6cc5e2375399cb"},
-]
-
-[[package]]
-name = "shapely"
-version = "2.0.6"
-description = "Manipulation and analysis of geometric objects"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "shapely-2.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29a34e068da2d321e926b5073539fd2a1d4429a2c656bd63f0bd4c8f5b236d0b"},
- {file = "shapely-2.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c84c3f53144febf6af909d6b581bc05e8785d57e27f35ebaa5c1ab9baba13b"},
- {file = "shapely-2.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad2fae12dca8d2b727fa12b007e46fbc522148a584f5d6546c539f3464dccde"},
- {file = "shapely-2.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3304883bd82d44be1b27a9d17f1167fda8c7f5a02a897958d86c59ec69b705e"},
- {file = "shapely-2.0.6-cp310-cp310-win32.whl", hash = "sha256:3ec3a0eab496b5e04633a39fa3d5eb5454628228201fb24903d38174ee34565e"},
- {file = "shapely-2.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:28f87cdf5308a514763a5c38de295544cb27429cfa655d50ed8431a4796090c4"},
- {file = "shapely-2.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5aeb0f51a9db176da9a30cb2f4329b6fbd1e26d359012bb0ac3d3c7781667a9e"},
- {file = "shapely-2.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9a7a78b0d51257a367ee115f4d41ca4d46edbd0dd280f697a8092dd3989867b2"},
- {file = "shapely-2.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f32c23d2f43d54029f986479f7c1f6e09c6b3a19353a3833c2ffb226fb63a855"},
- {file = "shapely-2.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3dc9fb0eb56498912025f5eb352b5126f04801ed0e8bdbd867d21bdbfd7cbd0"},
- {file = "shapely-2.0.6-cp311-cp311-win32.whl", hash = "sha256:d93b7e0e71c9f095e09454bf18dad5ea716fb6ced5df3cb044564a00723f339d"},
- {file = "shapely-2.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:c02eb6bf4cfb9fe6568502e85bb2647921ee49171bcd2d4116c7b3109724ef9b"},
- {file = "shapely-2.0.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cec9193519940e9d1b86a3b4f5af9eb6910197d24af02f247afbfb47bcb3fab0"},
- {file = "shapely-2.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83b94a44ab04a90e88be69e7ddcc6f332da7c0a0ebb1156e1c4f568bbec983c3"},
- {file = "shapely-2.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:537c4b2716d22c92036d00b34aac9d3775e3691f80c7aa517c2c290351f42cd8"},
- {file = "shapely-2.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fea108334be345c283ce74bf064fa00cfdd718048a8af7343c59eb40f59726"},
- {file = "shapely-2.0.6-cp312-cp312-win32.whl", hash = "sha256:42fd4cd4834747e4990227e4cbafb02242c0cffe9ce7ef9971f53ac52d80d55f"},
- {file = "shapely-2.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:665990c84aece05efb68a21b3523a6b2057e84a1afbef426ad287f0796ef8a48"},
- {file = "shapely-2.0.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:42805ef90783ce689a4dde2b6b2f261e2c52609226a0438d882e3ced40bb3013"},
- {file = "shapely-2.0.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6d2cb146191a47bd0cee8ff5f90b47547b82b6345c0d02dd8b25b88b68af62d7"},
- {file = "shapely-2.0.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3fdef0a1794a8fe70dc1f514440aa34426cc0ae98d9a1027fb299d45741c381"},
- {file = "shapely-2.0.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c665a0301c645615a107ff7f52adafa2153beab51daf34587170d85e8ba6805"},
- {file = "shapely-2.0.6-cp313-cp313-win32.whl", hash = "sha256:0334bd51828f68cd54b87d80b3e7cee93f249d82ae55a0faf3ea21c9be7b323a"},
- {file = "shapely-2.0.6-cp313-cp313-win_amd64.whl", hash = "sha256:d37d070da9e0e0f0a530a621e17c0b8c3c9d04105655132a87cfff8bd77cc4c2"},
- {file = "shapely-2.0.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fa7468e4f5b92049c0f36d63c3e309f85f2775752e076378e36c6387245c5462"},
- {file = "shapely-2.0.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed5867e598a9e8ac3291da6cc9baa62ca25706eea186117034e8ec0ea4355653"},
- {file = "shapely-2.0.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81d9dfe155f371f78c8d895a7b7f323bb241fb148d848a2bf2244f79213123fe"},
- {file = "shapely-2.0.6-cp37-cp37m-win32.whl", hash = "sha256:fbb7bf02a7542dba55129062570211cfb0defa05386409b3e306c39612e7fbcc"},
- {file = "shapely-2.0.6-cp37-cp37m-win_amd64.whl", hash = "sha256:837d395fac58aa01aa544495b97940995211e3e25f9aaf87bc3ba5b3a8cd1ac7"},
- {file = "shapely-2.0.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c6d88ade96bf02f6bfd667ddd3626913098e243e419a0325ebef2bbd481d1eb6"},
- {file = "shapely-2.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b3b818c4407eaa0b4cb376fd2305e20ff6df757bf1356651589eadc14aab41b"},
- {file = "shapely-2.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbc783529a21f2bd50c79cef90761f72d41c45622b3e57acf78d984c50a5d13"},
- {file = "shapely-2.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2423f6c0903ebe5df6d32e0066b3d94029aab18425ad4b07bf98c3972a6e25a1"},
- {file = "shapely-2.0.6-cp38-cp38-win32.whl", hash = "sha256:2de00c3bfa80d6750832bde1d9487e302a6dd21d90cb2f210515cefdb616e5f5"},
- {file = "shapely-2.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:3a82d58a1134d5e975f19268710e53bddd9c473743356c90d97ce04b73e101ee"},
- {file = "shapely-2.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:392f66f458a0a2c706254f473290418236e52aa4c9b476a072539d63a2460595"},
- {file = "shapely-2.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eba5bae271d523c938274c61658ebc34de6c4b33fdf43ef7e938b5776388c1be"},
- {file = "shapely-2.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7060566bc4888b0c8ed14b5d57df8a0ead5c28f9b69fb6bed4476df31c51b0af"},
- {file = "shapely-2.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b02154b3e9d076a29a8513dffcb80f047a5ea63c897c0cd3d3679f29363cf7e5"},
- {file = "shapely-2.0.6-cp39-cp39-win32.whl", hash = "sha256:44246d30124a4f1a638a7d5419149959532b99dfa25b54393512e6acc9c211ac"},
- {file = "shapely-2.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:2b542d7f1dbb89192d3512c52b679c822ba916f93479fa5d4fc2fe4fa0b3c9e8"},
- {file = "shapely-2.0.6.tar.gz", hash = "sha256:997f6159b1484059ec239cacaa53467fd8b5564dabe186cd84ac2944663b0bf6"},
-]
-
-[package.dependencies]
-numpy = ">=1.14,<3"
-
-[package.extras]
-docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"]
-test = ["pytest", "pytest-cov"]
-
-[[package]]
-name = "six"
-version = "1.16.0"
-description = "Python 2 and 3 compatibility utilities"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
-files = [
- {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
- {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
-]
-
-[[package]]
-name = "smmap"
-version = "5.0.1"
-description = "A pure Python implementation of a sliding window memory map manager"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"},
- {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"},
-]
-
-[[package]]
-name = "snakeviz"
-version = "2.2.0"
-description = "A web-based viewer for Python profiler output"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "snakeviz-2.2.0-py2.py3-none-any.whl", hash = "sha256:569e2d71c47f80a886aa6e70d6405cb6d30aa3520969ad956b06f824c5f02b8e"},
- {file = "snakeviz-2.2.0.tar.gz", hash = "sha256:7bfd00be7ae147eb4a170a471578e1cd3f41f803238958b6b8efcf2c698a6aa9"},
-]
-
-[package.dependencies]
-tornado = ">=2.0"
-
-[[package]]
-name = "sniffio"
-version = "1.3.1"
-description = "Sniff out which async library your code is running under"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
- {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
-]
-
-[[package]]
-name = "soupsieve"
-version = "2.6"
-description = "A modern CSS selector implementation for Beautiful Soup."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
- {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
-]
-
-[[package]]
-name = "speechrecognition"
-version = "3.10.4"
-description = "Library for performing speech recognition, with support for several engines and APIs, online and offline."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "SpeechRecognition-3.10.4-py2.py3-none-any.whl", hash = "sha256:723b8155692a8ed11a30013f15f89a3e57c5dc8bc73c8cb024bf9bd14c21fba5"},
- {file = "speechrecognition-3.10.4.tar.gz", hash = "sha256:986bafcf61f14625c2f3cea6a471838edd379ed68aeed7b8f3c0fb41e21f1125"},
-]
-
-[package.dependencies]
-requests = ">=2.26.0"
-typing-extensions = "*"
-
-[package.extras]
-dev = ["flake8", "rstcheck"]
-whisper-api = ["openai"]
-whisper-local = ["openai-whisper", "soundfile"]
-
-[[package]]
-name = "spidev"
-version = "3.6"
-description = "Python bindings for Linux SPI access through spidev"
-optional = true
-python-versions = "*"
-files = [
- {file = "spidev-3.6-cp39-cp39-linux_armv7l.whl", hash = "sha256:280abc00a1ef7780ef62c3f294f52a2527b6c47d8c269fea98664970bcaf6da5"},
- {file = "spidev-3.6.tar.gz", hash = "sha256:14dbc37594a4aaef85403ab617985d3c3ef464d62bc9b769ef552db53701115b"},
-]
-
-[[package]]
-name = "stack-data"
-version = "0.6.3"
-description = "Extract data from python stack frames and tracebacks for informative displays"
-optional = true
-python-versions = "*"
-files = [
- {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
- {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
-]
-
-[package.dependencies]
-asttokens = ">=2.1.0"
-executing = ">=1.2.0"
-pure-eval = "*"
-
-[package.extras]
-tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
-
-[[package]]
-name = "sympy"
-version = "1.13.3"
-description = "Computer algebra system (CAS) in Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"},
- {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"},
-]
-
-[package.dependencies]
-mpmath = ">=1.1.0,<1.4"
-
-[package.extras]
-dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"]
-
-[[package]]
-name = "tabulate"
-version = "0.9.0"
-description = "Pretty-print tabular data"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
- {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
-]
-
-[package.extras]
-widechars = ["wcwidth"]
-
-[[package]]
-name = "tenacity"
-version = "9.0.0"
-description = "Retry code until it succeeds"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"},
- {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"},
-]
-
-[package.extras]
-doc = ["reno", "sphinx"]
-test = ["pytest", "tornado (>=4.5)", "typeguard"]
-
-[[package]]
-name = "termcolor"
-version = "2.5.0"
-description = "ANSI color formatting for output in terminal"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "termcolor-2.5.0-py3-none-any.whl", hash = "sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8"},
- {file = "termcolor-2.5.0.tar.gz", hash = "sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f"},
-]
-
-[package.extras]
-tests = ["pytest", "pytest-cov"]
-
-[[package]]
-name = "terminado"
-version = "0.18.1"
-description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"},
- {file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"},
-]
-
-[package.dependencies]
-ptyprocess = {version = "*", markers = "os_name != \"nt\""}
-pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""}
-tornado = ">=6.1.0"
-
-[package.extras]
-docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
-test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"]
-typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"]
-
-[[package]]
-name = "tifffile"
-version = "2024.9.20"
-description = "Read and write TIFF files"
-optional = true
-python-versions = ">=3.10"
-files = [
- {file = "tifffile-2024.9.20-py3-none-any.whl", hash = "sha256:c54dc85bc1065d972cb8a6ffb3181389d597876aa80177933459733e4ed243dd"},
- {file = "tifffile-2024.9.20.tar.gz", hash = "sha256:3fbf3be2f995a7051a8ae05a4be70c96fc0789f22ed6f1c4104c973cf68a640b"},
-]
-
-[package.dependencies]
-numpy = "*"
-
-[package.extras]
-all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"]
-codecs = ["imagecodecs (>=2023.8.12)"]
-plot = ["matplotlib"]
-test = ["cmapfile", "czifile", "dask", "defusedxml", "fsspec", "imagecodecs", "lfdfiles", "lxml", "ndtiff", "oiffile", "psdtags", "pytest", "roifile", "xarray", "zarr"]
-xml = ["defusedxml", "lxml"]
-zarr = ["fsspec", "zarr"]
-
-[[package]]
-name = "tinycss2"
-version = "1.3.0"
-description = "A tiny CSS parser"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"},
- {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"},
-]
-
-[package.dependencies]
-webencodings = ">=0.4"
-
-[package.extras]
-doc = ["sphinx", "sphinx_rtd_theme"]
-test = ["pytest", "ruff"]
-
-[[package]]
-name = "tomli"
-version = "2.0.2"
-description = "A lil' TOML parser"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
- {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
-]
-
-[[package]]
-name = "torch"
-version = "2.4.1"
-description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
-optional = false
-python-versions = ">=3.8.0"
-files = [
- {file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"},
- {file = "torch-2.4.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:e8ac1985c3ff0f60d85b991954cfc2cc25f79c84545aead422763148ed2759e3"},
- {file = "torch-2.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91e326e2ccfb1496e3bee58f70ef605aeb27bd26be07ba64f37dcaac3d070ada"},
- {file = "torch-2.4.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d36a8ef100f5bff3e9c3cea934b9e0d7ea277cb8210c7152d34a9a6c5830eadd"},
- {file = "torch-2.4.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0b5f88afdfa05a335d80351e3cea57d38e578c8689f751d35e0ff36bce872113"},
- {file = "torch-2.4.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:ef503165f2341942bfdf2bd520152f19540d0c0e34961232f134dc59ad435be8"},
- {file = "torch-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:092e7c2280c860eff762ac08c4bdcd53d701677851670695e0c22d6d345b269c"},
- {file = "torch-2.4.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:ddddbd8b066e743934a4200b3d54267a46db02106876d21cf31f7da7a96f98ea"},
- {file = "torch-2.4.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:fdc4fe11db3eb93c1115d3e973a27ac7c1a8318af8934ffa36b0370efe28e042"},
- {file = "torch-2.4.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:18835374f599207a9e82c262153c20ddf42ea49bc76b6eadad8e5f49729f6e4d"},
- {file = "torch-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:ebea70ff30544fc021d441ce6b219a88b67524f01170b1c538d7d3ebb5e7f56c"},
- {file = "torch-2.4.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:72b484d5b6cec1a735bf3fa5a1c4883d01748698c5e9cfdbeb4ffab7c7987e0d"},
- {file = "torch-2.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c99e1db4bf0c5347107845d715b4aa1097e601bdc36343d758963055e9599d93"},
- {file = "torch-2.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b57f07e92858db78c5b72857b4f0b33a65b00dc5d68e7948a8494b0314efb880"},
- {file = "torch-2.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:f18197f3f7c15cde2115892b64f17c80dbf01ed72b008020e7da339902742cf6"},
- {file = "torch-2.4.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:5fc1d4d7ed265ef853579caf272686d1ed87cebdcd04f2a498f800ffc53dab71"},
- {file = "torch-2.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:40f6d3fe3bae74efcf08cb7f8295eaddd8a838ce89e9d26929d4edd6d5e4329d"},
- {file = "torch-2.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c9299c16c9743001ecef515536ac45900247f4338ecdf70746f2461f9e4831db"},
- {file = "torch-2.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:6bce130f2cd2d52ba4e2c6ada461808de7e5eccbac692525337cfb4c19421846"},
- {file = "torch-2.4.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a38de2803ee6050309aac032676536c3d3b6a9804248537e38e098d0e14817ec"},
-]
-
-[package.dependencies]
-filelock = "*"
-fsspec = "*"
-jinja2 = "*"
-networkx = "*"
-nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cudnn-cu12 = {version = "9.1.0.70", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
-setuptools = "*"
-sympy = "*"
-triton = {version = "3.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""}
-typing-extensions = ">=4.8.0"
-
-[package.extras]
-opt-einsum = ["opt-einsum (>=3.3)"]
-optree = ["optree (>=0.11.0)"]
-
-[[package]]
-name = "torchvision"
-version = "0.19.1"
-description = "image and video datasets and models for torch deep learning"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "torchvision-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:54e8513099e6f586356c70f809d34f391af71ad182fe071cc328a28af2c40608"},
- {file = "torchvision-0.19.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:20a1f5e02bfdad7714e55fa3fa698347c11d829fa65e11e5a84df07d93350eed"},
- {file = "torchvision-0.19.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:7b063116164be52fc6deb4762de7f8c90bfa3a65f8d5caf17f8e2d5aadc75a04"},
- {file = "torchvision-0.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:f40b6acabfa886da1bc3768f47679c61feee6bde90deb979d9f300df8c8a0145"},
- {file = "torchvision-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:40514282b4896d62765b8e26d7091c32e17c35817d00ec4be2362ea3ba3d1787"},
- {file = "torchvision-0.19.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:5a91be061ae5d6d5b95e833b93e57ca4d3c56c5a57444dd15da2e3e7fba96050"},
- {file = "torchvision-0.19.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d71a6a6fe3a5281ca3487d4c56ad4aad20ff70f82f1d7c79bcb6e7b0c2af00c8"},
- {file = "torchvision-0.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:70dea324174f5e9981b68e4b7cd524512c106ba64aedef560a86a0bbf2fbf62c"},
- {file = "torchvision-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27ece277ff0f6cdc7fed0627279c632dcb2e58187da771eca24b0fbcf3f8590d"},
- {file = "torchvision-0.19.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:c659ff92a61f188a1a7baef2850f3c0b6c85685447453c03d0e645ba8f1dcc1c"},
- {file = "torchvision-0.19.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:c07bf43c2a145d792ecd9d0503d6c73577147ece508d45600d8aac77e4cdfcf9"},
- {file = "torchvision-0.19.1-cp312-cp312-win_amd64.whl", hash = "sha256:b4283d283675556bb0eae31d29996f53861b17cbdcdf3509e6bc050414ac9289"},
- {file = "torchvision-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4e4f5b24ea6b087b02ed492ab1e21bba3352c4577e2def14248cfc60732338"},
- {file = "torchvision-0.19.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9281d63ead929bb19143731154cd1d8bf0b5e9873dff8578a40e90a6bec3c6fa"},
- {file = "torchvision-0.19.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:4d10bc9083c4d5fadd7edd7b729700a7be48dab4f62278df3bc73fa48e48a155"},
- {file = "torchvision-0.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ccf085ef1824fb9e16f1901285bf89c298c62dfd93267a39e8ee42c71255242f"},
- {file = "torchvision-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:731f434d91586769e255b5d70ed1a4457e0a1394a95f4aacf0e1e7e21f80c098"},
- {file = "torchvision-0.19.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:febe4f14d4afcb47cc861d8be7760ab6a123cd0817f97faf5771488cb6aa90f4"},
- {file = "torchvision-0.19.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e328309b8670a2e889b2fe76a1c2744a099c11c984da9a822357bd9debd699a5"},
- {file = "torchvision-0.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:6616f12e00a22e7f3fedbd0fccb0804c05e8fe22871668f10eae65cf3f283614"},
-]
-
-[package.dependencies]
-numpy = "*"
-pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0"
-torch = "2.4.1"
-
-[package.extras]
-gdown = ["gdown (>=4.7.3)"]
-scipy = ["scipy"]
-
-[[package]]
-name = "tornado"
-version = "6.4.1"
-description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"},
- {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"},
- {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"},
- {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"},
- {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"},
- {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"},
- {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"},
- {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"},
- {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"},
- {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"},
- {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"},
-]
-
-[[package]]
-name = "tqdm"
-version = "4.66.5"
-description = "Fast, Extensible Progress Meter"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"},
- {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"},
-]
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[package.extras]
-dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
-notebook = ["ipywidgets (>=6)"]
-slack = ["slack-sdk"]
-telegram = ["requests"]
-
-[[package]]
-name = "traitlets"
-version = "5.14.3"
-description = "Traitlets Python configuration system"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"},
- {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"},
-]
-
-[package.extras]
-docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
-test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"]
-
-[[package]]
-name = "transforms3d"
-version = "0.4.2"
-description = "Functions for 3D coordinate transformations"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "transforms3d-0.4.2-py3-none-any.whl", hash = "sha256:1c70399d9e9473ecc23311fd947f727f7c69ed0b063244828c383aa1aefa5941"},
- {file = "transforms3d-0.4.2.tar.gz", hash = "sha256:e8b5df30eaedbee556e81c6938e55aab5365894e47d0a17615d7db7fd2393680"},
-]
-
-[package.dependencies]
-numpy = ">=1.15"
-
-[[package]]
-name = "trimesh"
-version = "4.4.7"
-description = "Import, export, process, analyze and view triangular meshes."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "trimesh-4.4.7-py3-none-any.whl", hash = "sha256:6df98f3f5b971945b416f567b7ff6ee0c51b70f01b80a16a990fdcceb8dbd114"},
- {file = "trimesh-4.4.7.tar.gz", hash = "sha256:e6619c70c99006d41f175bd5e1ba2c8c3dfdb00c2b41d65059917942e2f6971a"},
-]
-
-[package.dependencies]
-numpy = ">=1.20"
-
-[package.extras]
-all = ["trimesh[deprecated,easy,recommend,test]"]
-deprecated = ["gmsh (==4.12.2)"]
-easy = ["chardet", "colorlog", "embreex", "httpx", "jsonschema", "lxml", "manifold3d (>=2.3.0)", "mapbox-earcut (>=1.0.2)", "networkx", "pillow", "pycollada", "rtree", "scipy", "setuptools", "shapely", "svg.path", "vhacdx", "xatlas", "xxhash"]
-recommend = ["cascadio", "glooey", "meshio", "openctm", "psutil", "pyglet (<2)", "python-fcl", "scikit-image", "sympy"]
-test = ["coveralls", "ezdxf", "matplotlib", "pyinstrument", "pymeshlab", "pyright", "pytest", "pytest-beartype", "pytest-cov", "ruff"]
-
-[[package]]
-name = "triton"
-version = "3.0.0"
-description = "A language and compiler for custom Deep Learning operations"
-optional = false
-python-versions = "*"
-files = [
- {file = "triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a"},
- {file = "triton-3.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ce8520437c602fb633f1324cc3871c47bee3b67acf9756c1a66309b60e3216c"},
- {file = "triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb"},
- {file = "triton-3.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bcbf3b1c48af6a28011a5c40a5b3b9b5330530c3827716b5fbf6d7adcc1e53e9"},
- {file = "triton-3.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6e5727202f7078c56f91ff13ad0c1abab14a0e7f2c87e91b12b6f64f3e8ae609"},
-]
-
-[package.dependencies]
-filelock = "*"
-
-[package.extras]
-build = ["cmake (>=3.20)", "lit"]
-tests = ["autopep8", "flake8", "isort", "llnl-hatchet", "numpy", "pytest", "scipy (>=1.7.1)"]
-tutorials = ["matplotlib", "pandas", "tabulate"]
-
-[[package]]
-name = "types-python-dateutil"
-version = "2.9.0.20241003"
-description = "Typing stubs for python-dateutil"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"},
- {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"},
-]
-
-[[package]]
-name = "typing-extensions"
-version = "4.12.2"
-description = "Backported and Experimental Type Hints for Python 3.8+"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
- {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
-]
-
-[[package]]
-name = "tzdata"
-version = "2024.2"
-description = "Provider of IANA time zone data"
-optional = false
-python-versions = ">=2"
-files = [
- {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
- {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
-]
-
-[[package]]
-name = "urchin"
-version = "0.0.27"
-description = "URDF parser and manipulator for Python"
-optional = true
-python-versions = "*"
-files = [
- {file = "urchin-0.0.27-py3-none-any.whl", hash = "sha256:e4cf43c8f52a44e0075e1778b76c203922085dd1fb9340cd703bf54188208611"},
- {file = "urchin-0.0.27.tar.gz", hash = "sha256:bda308ed7d2b80eb1e097dc3963fabe9e00a6cbd89a1f6be6f063c2a065d3671"},
-]
-
-[package.dependencies]
-lxml = "*"
-networkx = "*"
-numpy = "*"
-pillow = "*"
-pycollada = ">=0.6"
-pyribbit = ">=0.1.46"
-scipy = "*"
-six = "*"
-trimesh = "*"
-
-[package.extras]
-dev = ["flake8", "pre-commit", "pytest", "pytest-cov", "tox"]
-docs = ["sphinx", "sphinx-automodapi", "sphinx-rtd-theme"]
-
-[[package]]
-name = "urdf-parser-py"
-version = "0.0.4"
-description = "This package contains a python parser for the Unified Robot Description Format (URDF), which is an XML format for representing a robot model."
-optional = true
-python-versions = "*"
-files = [
- {file = "urdf_parser_py-0.0.4.tar.gz", hash = "sha256:e983f637145fded67bcff6a542302069bb975b2edf1b18318c093abba1b794cc"},
-]
-
-[package.dependencies]
-lxml = "*"
-pyyaml = "*"
-
-[[package]]
-name = "uri-template"
-version = "1.3.0"
-description = "RFC 6570 URI Template Processor"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"},
- {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"},
-]
-
-[package.extras]
-dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"]
-
-[[package]]
-name = "urllib3"
-version = "2.2.3"
-description = "HTTP library with thread-safe connection pooling, file post, and more."
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
- {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
-]
-
-[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-h2 = ["h2 (>=4,<5)"]
-socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
-zstd = ["zstandard (>=0.18.0)"]
-
-[[package]]
-name = "virtualenv"
-version = "20.26.6"
-description = "Virtual Python Environment builder"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"},
- {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"},
-]
-
-[package.dependencies]
-distlib = ">=0.3.7,<1"
-filelock = ">=3.12.2,<4"
-platformdirs = ">=3.9.1,<5"
-
-[package.extras]
-docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
-test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
-
-[[package]]
-name = "wandb"
-version = "0.18.3"
-description = "A CLI and library for interacting with the Weights & Biases API."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "wandb-0.18.3-py3-none-any.whl", hash = "sha256:7da64f7da0ff7572439de10bfd45534e8811e71e78ac2ccc3b818f1c0f3a9aef"},
- {file = "wandb-0.18.3-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:6674d8a5c40c79065b9c7eb765136756d5ebc9457a5f9abc820a660fb23f8b67"},
- {file = "wandb-0.18.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:741f566e409a2684d3047e4cc25e8e914d78196b901190937b24b6abb8b052e5"},
- {file = "wandb-0.18.3-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:8be5e877570b693001c52dcc2089e48e6a4dcbf15f3adf5c9349f95148b59d58"},
- {file = "wandb-0.18.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d788852bd4739fa18de3918f309c3a955b5cef3247fae1c40df3a63af637e1a0"},
- {file = "wandb-0.18.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab81424eb207d78239a8d69c90521a70074fb81e3709055484e43c76fe44dc08"},
- {file = "wandb-0.18.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:2c91315b8b62423eae18577d66a4b4bb8e4341a7d5c849cb2963e3b3dff0bf6d"},
- {file = "wandb-0.18.3-py3-none-win32.whl", hash = "sha256:92a647dab783938ec87776a9fae8a13e72e6dad939c53e357cdea9d2570f0ad8"},
- {file = "wandb-0.18.3-py3-none-win_amd64.whl", hash = "sha256:29cac2cfa3124241fed22cfedc9a52e1500275ee9bbb0b428ce4bf63c4723bf0"},
- {file = "wandb-0.18.3.tar.gz", hash = "sha256:eb2574cea72bc908c6ce1b37edf7a889619e6e06e1b4714eecfe0662ded43c06"},
-]
-
-[package.dependencies]
-click = ">=7.1,<8.0.0 || >8.0.0"
-docker-pycreds = ">=0.4.0"
-gitpython = ">=1.0.0,<3.1.29 || >3.1.29"
-platformdirs = "*"
-protobuf = {version = ">=3.19.0,<4.21.0 || >4.21.0,<5.28.0 || >5.28.0,<6", markers = "python_version > \"3.9\" or sys_platform != \"linux\""}
-psutil = ">=5.0.0"
-pyyaml = "*"
-requests = ">=2.0.0,<3"
-sentry-sdk = ">=1.0.0"
-setproctitle = "*"
-setuptools = "*"
-
-[package.extras]
-aws = ["boto3"]
-azure = ["azure-identity", "azure-storage-blob"]
-gcp = ["google-cloud-storage"]
-importers = ["filelock", "mlflow", "polars (<=1.2.1)", "rich", "tenacity"]
-kubeflow = ["google-cloud-storage", "kubernetes", "minio", "sh"]
-launch = ["awscli", "azure-containerregistry", "azure-identity", "azure-storage-blob", "boto3", "botocore", "chardet", "google-auth", "google-cloud-aiplatform", "google-cloud-artifact-registry", "google-cloud-compute", "google-cloud-storage", "iso8601", "jsonschema", "kubernetes", "kubernetes-asyncio", "nbconvert", "nbformat", "optuna", "pydantic", "pyyaml (>=6.0.0)", "tomli", "typing-extensions"]
-media = ["bokeh", "imageio", "moviepy", "numpy", "pillow", "plotly (>=5.18.0)", "rdkit", "soundfile"]
-models = ["cloudpickle"]
-perf = ["orjson"]
-sweeps = ["sweeps (>=0.2.0)"]
-workspaces = ["wandb-workspaces"]
-
-[[package]]
-name = "wcwidth"
-version = "0.2.13"
-description = "Measures the displayed width of unicode strings in a terminal"
-optional = false
-python-versions = "*"
-files = [
- {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"},
- {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"},
-]
-
-[[package]]
-name = "webcolors"
-version = "24.8.0"
-description = "A library for working with the color formats defined by HTML and CSS."
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"},
- {file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"},
-]
-
-[package.extras]
-docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"]
-tests = ["coverage[toml]"]
-
-[[package]]
-name = "webencodings"
-version = "0.5.1"
-description = "Character encoding aliases for legacy web content"
-optional = true
-python-versions = "*"
-files = [
- {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
- {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
-]
-
-[[package]]
-name = "websocket-client"
-version = "1.8.0"
-description = "WebSocket client for Python with low level API options"
-optional = true
-python-versions = ">=3.8"
-files = [
- {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"},
- {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"},
-]
-
-[package.extras]
-docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"]
-optional = ["python-socks", "wsaccel"]
-test = ["websockets"]
-
-[[package]]
-name = "werkzeug"
-version = "3.1.1"
-description = "The comprehensive WSGI web application library."
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "werkzeug-3.1.1-py3-none-any.whl", hash = "sha256:a71124d1ef06008baafa3d266c02f56e1836a5984afd6dd6c9230669d60d9fb5"},
- {file = "werkzeug-3.1.1.tar.gz", hash = "sha256:8cd39dfbdfc1e051965f156163e2974e52c210f130810e9ad36858f0fd3edad4"},
-]
-
-[package.dependencies]
-MarkupSafe = ">=2.1.1"
-
-[package.extras]
-watchdog = ["watchdog (>=2.3)"]
-
-[[package]]
-name = "widgetsnbextension"
-version = "4.0.13"
-description = "Jupyter interactive widgets for Jupyter Notebook"
-optional = true
-python-versions = ">=3.7"
-files = [
- {file = "widgetsnbextension-4.0.13-py3-none-any.whl", hash = "sha256:74b2692e8500525cc38c2b877236ba51d34541e6385eeed5aec15a70f88a6c71"},
- {file = "widgetsnbextension-4.0.13.tar.gz", hash = "sha256:ffcb67bc9febd10234a362795f643927f4e0c05d9342c727b65d2384f8feacb6"},
-]
-
-[[package]]
-name = "xmltodict"
-version = "0.14.1"
-description = "Makes working with XML feel like you are working with JSON"
-optional = true
-python-versions = ">=3.6"
-files = [
- {file = "xmltodict-0.14.1-py2.py3-none-any.whl", hash = "sha256:3ef4a7b71c08f19047fcbea572e1d7f4207ab269da1565b5d40e9823d3894e63"},
- {file = "xmltodict-0.14.1.tar.gz", hash = "sha256:338c8431e4fc554517651972d62f06958718f6262b04316917008e8fd677a6b0"},
-]
-
-[[package]]
-name = "xxhash"
-version = "3.5.0"
-description = "Python binding for xxHash"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "xxhash-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ece616532c499ee9afbb83078b1b952beffef121d989841f7f4b3dc5ac0fd212"},
- {file = "xxhash-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3171f693dbc2cef6477054a665dc255d996646b4023fe56cb4db80e26f4cc520"},
- {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5d3e570ef46adaf93fc81b44aca6002b5a4d8ca11bd0580c07eac537f36680"},
- {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7cb29a034301e2982df8b1fe6328a84f4b676106a13e9135a0d7e0c3e9f806da"},
- {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d0d307d27099bb0cbeea7260eb39ed4fdb99c5542e21e94bb6fd29e49c57a23"},
- {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0342aafd421795d740e514bc9858ebddfc705a75a8c5046ac56d85fe97bf196"},
- {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dbbd9892c5ebffeca1ed620cf0ade13eb55a0d8c84e0751a6653adc6ac40d0c"},
- {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4cc2d67fdb4d057730c75a64c5923abfa17775ae234a71b0200346bfb0a7f482"},
- {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ec28adb204b759306a3d64358a5e5c07d7b1dd0ccbce04aa76cb9377b7b70296"},
- {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1328f6d8cca2b86acb14104e381225a3d7b42c92c4b86ceae814e5c400dbb415"},
- {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8d47ebd9f5d9607fd039c1fbf4994e3b071ea23eff42f4ecef246ab2b7334198"},
- {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b96d559e0fcddd3343c510a0fe2b127fbff16bf346dd76280b82292567523442"},
- {file = "xxhash-3.5.0-cp310-cp310-win32.whl", hash = "sha256:61c722ed8d49ac9bc26c7071eeaa1f6ff24053d553146d5df031802deffd03da"},
- {file = "xxhash-3.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:9bed5144c6923cc902cd14bb8963f2d5e034def4486ab0bbe1f58f03f042f9a9"},
- {file = "xxhash-3.5.0-cp310-cp310-win_arm64.whl", hash = "sha256:893074d651cf25c1cc14e3bea4fceefd67f2921b1bb8e40fcfeba56820de80c6"},
- {file = "xxhash-3.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02c2e816896dc6f85922ced60097bcf6f008dedfc5073dcba32f9c8dd786f3c1"},
- {file = "xxhash-3.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6027dcd885e21581e46d3c7f682cfb2b870942feeed58a21c29583512c3f09f8"},
- {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1308fa542bbdbf2fa85e9e66b1077eea3a88bef38ee8a06270b4298a7a62a166"},
- {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28b2fdcee797e1c1961cd3bcd3d545cab22ad202c846235197935e1df2f8ef7"},
- {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:924361811732ddad75ff23e90efd9ccfda4f664132feecb90895bade6a1b4623"},
- {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89997aa1c4b6a5b1e5b588979d1da048a3c6f15e55c11d117a56b75c84531f5a"},
- {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:685c4f4e8c59837de103344eb1c8a3851f670309eb5c361f746805c5471b8c88"},
- {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbd2ecfbfee70bc1a4acb7461fa6af7748ec2ab08ac0fa298f281c51518f982c"},
- {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:25b5a51dc3dfb20a10833c8eee25903fd2e14059e9afcd329c9da20609a307b2"},
- {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a8fb786fb754ef6ff8c120cb96629fb518f8eb5a61a16aac3a979a9dbd40a084"},
- {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a905ad00ad1e1c34fe4e9d7c1d949ab09c6fa90c919860c1534ff479f40fd12d"},
- {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:963be41bcd49f53af6d795f65c0da9b4cc518c0dd9c47145c98f61cb464f4839"},
- {file = "xxhash-3.5.0-cp311-cp311-win32.whl", hash = "sha256:109b436096d0a2dd039c355fa3414160ec4d843dfecc64a14077332a00aeb7da"},
- {file = "xxhash-3.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:b702f806693201ad6c0a05ddbbe4c8f359626d0b3305f766077d51388a6bac58"},
- {file = "xxhash-3.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:c4dcb4120d0cc3cc448624147dba64e9021b278c63e34a38789b688fd0da9bf3"},
- {file = "xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00"},
- {file = "xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9"},
- {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84"},
- {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793"},
- {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be"},
- {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6"},
- {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90"},
- {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27"},
- {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2"},
- {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d"},
- {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab"},
- {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e"},
- {file = "xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8"},
- {file = "xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e"},
- {file = "xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2"},
- {file = "xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6"},
- {file = "xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5"},
- {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc"},
- {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3"},
- {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c"},
- {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb"},
- {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f"},
- {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7"},
- {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326"},
- {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf"},
- {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7"},
- {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c"},
- {file = "xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637"},
- {file = "xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43"},
- {file = "xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b"},
- {file = "xxhash-3.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6e5f70f6dca1d3b09bccb7daf4e087075ff776e3da9ac870f86ca316736bb4aa"},
- {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e76e83efc7b443052dd1e585a76201e40b3411fe3da7af4fe434ec51b2f163b"},
- {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33eac61d0796ca0591f94548dcfe37bb193671e0c9bcf065789b5792f2eda644"},
- {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ec70a89be933ea49222fafc3999987d7899fc676f688dd12252509434636622"},
- {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86b8e7f703ec6ff4f351cfdb9f428955859537125904aa8c963604f2e9d3e7"},
- {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0adfbd36003d9f86c8c97110039f7539b379f28656a04097e7434d3eaf9aa131"},
- {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:63107013578c8a730419adc05608756c3fa640bdc6abe806c3123a49fb829f43"},
- {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:683b94dbd1ca67557850b86423318a2e323511648f9f3f7b1840408a02b9a48c"},
- {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:5d2a01dcce81789cf4b12d478b5464632204f4c834dc2d064902ee27d2d1f0ee"},
- {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:a9d360a792cbcce2fe7b66b8d51274ec297c53cbc423401480e53b26161a290d"},
- {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:f0b48edbebea1b7421a9c687c304f7b44d0677c46498a046079d445454504737"},
- {file = "xxhash-3.5.0-cp37-cp37m-win32.whl", hash = "sha256:7ccb800c9418e438b44b060a32adeb8393764da7441eb52aa2aa195448935306"},
- {file = "xxhash-3.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c3bc7bf8cb8806f8d1c9bf149c18708cb1c406520097d6b0a73977460ea03602"},
- {file = "xxhash-3.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:74752ecaa544657d88b1d1c94ae68031e364a4d47005a90288f3bab3da3c970f"},
- {file = "xxhash-3.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dee1316133c9b463aa81aca676bc506d3f80d8f65aeb0bba2b78d0b30c51d7bd"},
- {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:602d339548d35a8579c6b013339fb34aee2df9b4e105f985443d2860e4d7ffaa"},
- {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:695735deeddfb35da1677dbc16a083445360e37ff46d8ac5c6fcd64917ff9ade"},
- {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1030a39ba01b0c519b1a82f80e8802630d16ab95dc3f2b2386a0b5c8ed5cbb10"},
- {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5bc08f33c4966f4eb6590d6ff3ceae76151ad744576b5fc6c4ba8edd459fdec"},
- {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160e0c19ee500482ddfb5d5570a0415f565d8ae2b3fd69c5dcfce8a58107b1c3"},
- {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f1abffa122452481a61c3551ab3c89d72238e279e517705b8b03847b1d93d738"},
- {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:d5e9db7ef3ecbfc0b4733579cea45713a76852b002cf605420b12ef3ef1ec148"},
- {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:23241ff6423378a731d84864bf923a41649dc67b144debd1077f02e6249a0d54"},
- {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:82b833d5563fefd6fceafb1aed2f3f3ebe19f84760fdd289f8b926731c2e6e91"},
- {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0a80ad0ffd78bef9509eee27b4a29e56f5414b87fb01a888353e3d5bda7038bd"},
- {file = "xxhash-3.5.0-cp38-cp38-win32.whl", hash = "sha256:50ac2184ffb1b999e11e27c7e3e70cc1139047e7ebc1aa95ed12f4269abe98d4"},
- {file = "xxhash-3.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:392f52ebbb932db566973693de48f15ce787cabd15cf6334e855ed22ea0be5b3"},
- {file = "xxhash-3.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfc8cdd7f33d57f0468b0614ae634cc38ab9202c6957a60e31d285a71ebe0301"},
- {file = "xxhash-3.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0c48b6300cd0b0106bf49169c3e0536408dfbeb1ccb53180068a18b03c662ab"},
- {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe1a92cfbaa0a1253e339ccec42dbe6db262615e52df591b68726ab10338003f"},
- {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33513d6cc3ed3b559134fb307aae9bdd94d7e7c02907b37896a6c45ff9ce51bd"},
- {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eefc37f6138f522e771ac6db71a6d4838ec7933939676f3753eafd7d3f4c40bc"},
- {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a606c8070ada8aa2a88e181773fa1ef17ba65ce5dd168b9d08038e2a61b33754"},
- {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42eca420c8fa072cc1dd62597635d140e78e384a79bb4944f825fbef8bfeeef6"},
- {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:604253b2143e13218ff1ef0b59ce67f18b8bd1c4205d2ffda22b09b426386898"},
- {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6e93a5ad22f434d7876665444a97e713a8f60b5b1a3521e8df11b98309bff833"},
- {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:7a46e1d6d2817ba8024de44c4fd79913a90e5f7265434cef97026215b7d30df6"},
- {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:30eb2efe6503c379b7ab99c81ba4a779748e3830241f032ab46bd182bf5873af"},
- {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c8aa771ff2c13dd9cda8166d685d7333d389fae30a4d2bb39d63ab5775de8606"},
- {file = "xxhash-3.5.0-cp39-cp39-win32.whl", hash = "sha256:5ed9ebc46f24cf91034544b26b131241b699edbfc99ec5e7f8f3d02d6eb7fba4"},
- {file = "xxhash-3.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:220f3f896c6b8d0316f63f16c077d52c412619e475f9372333474ee15133a558"},
- {file = "xxhash-3.5.0-cp39-cp39-win_arm64.whl", hash = "sha256:a7b1d8315d9b5e9f89eb2933b73afae6ec9597a258d52190944437158b49d38e"},
- {file = "xxhash-3.5.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2014c5b3ff15e64feecb6b713af12093f75b7926049e26a580e94dcad3c73d8c"},
- {file = "xxhash-3.5.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fab81ef75003eda96239a23eda4e4543cedc22e34c373edcaf744e721a163986"},
- {file = "xxhash-3.5.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e2febf914ace002132aa09169cc572e0d8959d0f305f93d5828c4836f9bc5a6"},
- {file = "xxhash-3.5.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5d3a10609c51da2a1c0ea0293fc3968ca0a18bd73838455b5bca3069d7f8e32b"},
- {file = "xxhash-3.5.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5a74f23335b9689b66eb6dbe2a931a88fcd7a4c2cc4b1cb0edba8ce381c7a1da"},
- {file = "xxhash-3.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b4154c00eb22e4d543f472cfca430e7962a0f1d0f3778334f2e08a7ba59363c"},
- {file = "xxhash-3.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d30bbc1644f726b825b3278764240f449d75f1a8bdda892e641d4a688b1494ae"},
- {file = "xxhash-3.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0b72f2423e2aa53077e54a61c28e181d23effeaafd73fcb9c494e60930c8e"},
- {file = "xxhash-3.5.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13de2b76c1835399b2e419a296d5b38dc4855385d9e96916299170085ef72f57"},
- {file = "xxhash-3.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0691bfcc4f9c656bcb96cc5db94b4d75980b9d5589f2e59de790091028580837"},
- {file = "xxhash-3.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:297595fe6138d4da2c8ce9e72a04d73e58725bb60f3a19048bc96ab2ff31c692"},
- {file = "xxhash-3.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc1276d369452040cbb943300dc8abeedab14245ea44056a2943183822513a18"},
- {file = "xxhash-3.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2061188a1ba352fc699c82bff722f4baacb4b4b8b2f0c745d2001e56d0dfb514"},
- {file = "xxhash-3.5.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38c384c434021e4f62b8d9ba0bc9467e14d394893077e2c66d826243025e1f81"},
- {file = "xxhash-3.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e6a4dd644d72ab316b580a1c120b375890e4c52ec392d4aef3c63361ec4d77d1"},
- {file = "xxhash-3.5.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:531af8845aaadcadf951b7e0c1345c6b9c68a990eeb74ff9acd8501a0ad6a1c9"},
- {file = "xxhash-3.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ce379bcaa9fcc00f19affa7773084dd09f5b59947b3fb47a1ceb0179f91aaa1"},
- {file = "xxhash-3.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd1b2281d01723f076df3c8188f43f2472248a6b63118b036e641243656b1b0f"},
- {file = "xxhash-3.5.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c770750cc80e8694492244bca7251385188bc5597b6a39d98a9f30e8da984e0"},
- {file = "xxhash-3.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b150b8467852e1bd844387459aa6fbe11d7f38b56e901f9f3b3e6aba0d660240"},
- {file = "xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f"},
-]
-
-[[package]]
-name = "yarl"
-version = "1.14.0"
-description = "Yet another URL library"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1bfc25aa6a7c99cf86564210f79a0b7d4484159c67e01232b116e445b3036547"},
- {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0cf21f46a15d445417de8fc89f2568852cf57fe8ca1ab3d19ddb24d45c0383ae"},
- {file = "yarl-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1dda53508df0de87b6e6b0a52d6718ff6c62a5aca8f5552748404963df639269"},
- {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:587c3cc59bc148a9b1c07a019346eda2549bc9f468acd2f9824d185749acf0a6"},
- {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3007a5b75cb50140708420fe688c393e71139324df599434633019314ceb8b59"},
- {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06ff23462398333c78b6f4f8d3d70410d657a471c2c5bbe6086133be43fc8f1a"},
- {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689a99a42ee4583fcb0d3a67a0204664aa1539684aed72bdafcbd505197a91c4"},
- {file = "yarl-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0547ab1e9345dc468cac8368d88ea4c5bd473ebc1d8d755347d7401982b5dd8"},
- {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:742aef0a99844faaac200564ea6f5e08facb285d37ea18bd1a5acf2771f3255a"},
- {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:176110bff341b6730f64a1eb3a7070e12b373cf1c910a9337e7c3240497db76f"},
- {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46a9772a1efa93f9cd170ad33101c1817c77e0e9914d4fe33e2da299d7cf0f9b"},
- {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ee2c68e4f2dd1b1c15b849ba1c96fac105fca6ffdb7c1e8be51da6fabbdeafb9"},
- {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:047b258e00b99091b6f90355521f026238c63bd76dcf996d93527bb13320eefd"},
- {file = "yarl-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0aa92e3e30a04f9462a25077db689c4ac5ea9ab6cc68a2e563881b987d42f16d"},
- {file = "yarl-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9baec588f015d0ee564057aa7574313c53a530662ffad930b7886becc85abdf"},
- {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:07f9eaf57719d6721ab15805d85f4b01a5b509a0868d7320134371bcb652152d"},
- {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c14b504a74e58e2deb0378b3eca10f3d076635c100f45b113c18c770b4a47a50"},
- {file = "yarl-1.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16a682a127930f3fc4e42583becca6049e1d7214bcad23520c590edd741d2114"},
- {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bedd2be05f48af19f0f2e9e1353921ce0c83f4a1c9e8556ecdcf1f1eae4892"},
- {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3ab950f8814f3b7b5e3eebc117986f817ec933676f68f0a6c5b2137dd7c9c69"},
- {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b693c63e7e64b524f54aa4888403c680342d1ad0d97be1707c531584d6aeeb4f"},
- {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cb3e40eaa98489f1e2e8b29f5ad02ee1ee40d6ce6b88d50cf0f205de1d9d2c"},
- {file = "yarl-1.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f24f08b6c9b9818fd80612c97857d28f9779f0d1211653ece9844fc7b414df2"},
- {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29a84a46ec3ebae7a1c024c055612b11e9363a8a23238b3e905552d77a2bc51b"},
- {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5cd5dad8366e0168e0fd23d10705a603790484a6dbb9eb272b33673b8f2cce72"},
- {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a152751af7ef7b5d5fa6d215756e508dd05eb07d0cf2ba51f3e740076aa74373"},
- {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3d569f877ed9a708e4c71a2d13d2940cb0791da309f70bd970ac1a5c088a0a92"},
- {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a615cad11ec3428020fb3c5a88d85ce1b5c69fd66e9fcb91a7daa5e855325dd"},
- {file = "yarl-1.14.0-cp311-cp311-win32.whl", hash = "sha256:bab03192091681d54e8225c53f270b0517637915d9297028409a2a5114ff4634"},
- {file = "yarl-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:985623575e5c4ea763056ffe0e2d63836f771a8c294b3de06d09480538316b13"},
- {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fc2c80bc87fba076e6cbb926216c27fba274dae7100a7b9a0983b53132dd99f2"},
- {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:55c144d363ad4626ca744556c049c94e2b95096041ac87098bb363dcc8635e8d"},
- {file = "yarl-1.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b03384eed107dbeb5f625a99dc3a7de8be04fc8480c9ad42fccbc73434170b20"},
- {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72a0d746d38cb299b79ce3d4d60ba0892c84bbc905d0d49c13df5bace1b65f8"},
- {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8648180b34faaea4aa5b5ca7e871d9eb1277033fa439693855cf0ea9195f85f1"},
- {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9557c9322aaa33174d285b0c1961fb32499d65ad1866155b7845edc876c3c835"},
- {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f50eb3837012a937a2b649ec872b66ba9541ad9d6f103ddcafb8231cfcafd22"},
- {file = "yarl-1.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8892fa575ac9b1b25fae7b221bc4792a273877b9b56a99ee2d8d03eeb3dbb1d2"},
- {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6a2c5c5bb2556dfbfffffc2bcfb9c235fd2b566d5006dfb2a37afc7e3278a07"},
- {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab3abc0b78a5dfaa4795a6afbe7b282b6aa88d81cf8c1bb5e394993d7cae3457"},
- {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:47eede5d11d669ab3759b63afb70d28d5328c14744b8edba3323e27dc52d298d"},
- {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe4d2536c827f508348d7b40c08767e8c7071614250927233bf0c92170451c0a"},
- {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0fd7b941dd1b00b5f0acb97455fea2c4b7aac2dd31ea43fb9d155e9bc7b78664"},
- {file = "yarl-1.14.0-cp312-cp312-win32.whl", hash = "sha256:99ff3744f5fe48288be6bc402533b38e89749623a43208e1d57091fc96b783b9"},
- {file = "yarl-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ca3894e9e9f72da93544f64988d9c052254a338a9f855165f37f51edb6591de"},
- {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d02d700705d67e09e1f57681f758f0b9d4412eeb70b2eb8d96ca6200b486db3"},
- {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:30600ba5db60f7c0820ef38a2568bb7379e1418ecc947a0f76fd8b2ff4257a97"},
- {file = "yarl-1.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e85d86527baebb41a214cc3b45c17177177d900a2ad5783dbe6f291642d4906f"},
- {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37001e5d4621cef710c8dc1429ca04e189e572f128ab12312eab4e04cf007132"},
- {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4f4547944d4f5cfcdc03f3f097d6f05bbbc915eaaf80a2ee120d0e756de377d"},
- {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ff4c819757f9bdb35de049a509814d6ce851fe26f06eb95a392a5640052482"},
- {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68ac1a09392ed6e3fd14be880d39b951d7b981fd135416db7d18a6208c536561"},
- {file = "yarl-1.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96952f642ac69075e44c7d0284528938fdff39422a1d90d3e45ce40b72e5e2d9"},
- {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a56fbe3d7f3bce1d060ea18d2413a2ca9ca814eea7cedc4d247b5f338d54844e"},
- {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7e2637d75e92763d1322cb5041573279ec43a80c0f7fbbd2d64f5aee98447b17"},
- {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9abe80ae2c9d37c17599557b712e6515f4100a80efb2cda15f5f070306477cd2"},
- {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:217a782020b875538eebf3948fac3a7f9bbbd0fd9bf8538f7c2ad7489e80f4e8"},
- {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9cfef3f14f75bf6aba73a76caf61f9d00865912a04a4393c468a7ce0981b519"},
- {file = "yarl-1.14.0-cp313-cp313-win32.whl", hash = "sha256:d8361c7d04e6a264481f0b802e395f647cd3f8bbe27acfa7c12049efea675bd1"},
- {file = "yarl-1.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:bc24f968b82455f336b79bf37dbb243b7d76cd40897489888d663d4e028f5069"},
- {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:91d875f75fabf76b3018c5f196bf3d308ed2b49ddcb46c1576d6b075754a1393"},
- {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4009def9be3a7e5175db20aa2d7307ecd00bbf50f7f0f989300710eee1d0b0b9"},
- {file = "yarl-1.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:582cedde49603f139be572252a318b30dc41039bc0b8165f070f279e5d12187f"},
- {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbd9ff43a04f8ffe8a959a944c2dca10d22f5f99fc6a459f49c3ebfb409309d9"},
- {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f805e37ed16cc212fdc538a608422d7517e7faf539bedea4fe69425bc55d76"},
- {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95e16e9eaa2d7f5d87421b8fe694dd71606aa61d74b824c8d17fc85cc51983d1"},
- {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:816d24f584edefcc5ca63428f0b38fee00b39fe64e3c5e558f895a18983efe96"},
- {file = "yarl-1.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd2660c01367eb3ef081b8fa0a5da7fe767f9427aa82023a961a5f28f0d4af6c"},
- {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:94b2bb9bcfd5be9d27004ea4398fb640373dd0c1a9e219084f42c08f77a720ab"},
- {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c2089a9afef887664115f7fa6d3c0edd6454adaca5488dba836ca91f60401075"},
- {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2192f718db4a8509f63dd6d950f143279211fa7e6a2c612edc17d85bf043d36e"},
- {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:8385ab36bf812e9d37cf7613999a87715f27ef67a53f0687d28c44b819df7cb0"},
- {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b4c1ecba93e7826dc71ddba75fb7740cdb52e7bd0be9f03136b83f54e6a1f511"},
- {file = "yarl-1.14.0-cp38-cp38-win32.whl", hash = "sha256:e749af6c912a7bb441d105c50c1a3da720474e8acb91c89350080dd600228f0e"},
- {file = "yarl-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:147e36331f6f63e08a14640acf12369e041e0751bb70d9362df68c2d9dcf0c87"},
- {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a9f917966d27f7ce30039fe8d900f913c5304134096554fd9bea0774bcda6d1"},
- {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a2f8fb7f944bcdfecd4e8d855f84c703804a594da5123dd206f75036e536d4d"},
- {file = "yarl-1.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f4e475f29a9122f908d0f1f706e1f2fc3656536ffd21014ff8a6f2e1b14d1d8"},
- {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8089d4634d8fa2b1806ce44fefa4979b1ab2c12c0bc7ef3dfa45c8a374811348"},
- {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b16f6c75cffc2dc0616ea295abb0e1967601bd1fb1e0af6a1de1c6c887f3439"},
- {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498b3c55087b9d762636bca9b45f60d37e51d24341786dc01b81253f9552a607"},
- {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3f8bfc1db82589ef965ed234b87de30d140db8b6dc50ada9e33951ccd8ec07a"},
- {file = "yarl-1.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:625f207b1799e95e7c823f42f473c1e9dbfb6192bd56bba8695656d92be4535f"},
- {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:781e2495e408a81e4eaeedeb41ba32b63b1980dddf8b60dbbeff6036bcd35049"},
- {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:659603d26d40dd4463200df9bfbc339fbfaed3fe32e5c432fe1dc2b5d4aa94b4"},
- {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4e0d45ebf975634468682c8bec021618b3ad52c37619e5c938f8f831fa1ac5c0"},
- {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a2e4725a08cb2b4794db09e350c86dee18202bb8286527210e13a1514dc9a59a"},
- {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:19268b4fec1d7760134f2de46ef2608c2920134fb1fa61e451f679e41356dc55"},
- {file = "yarl-1.14.0-cp39-cp39-win32.whl", hash = "sha256:337912bcdcf193ade64b9aae5a4017a0a1950caf8ca140362e361543c6773f21"},
- {file = "yarl-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:b6d0147574ce2e7b812c989e50fa72bbc5338045411a836bd066ce5fc8ac0bce"},
- {file = "yarl-1.14.0-py3-none-any.whl", hash = "sha256:c8ed4034f0765f8861620c1f2f2364d2e58520ea288497084dae880424fc0d9f"},
- {file = "yarl-1.14.0.tar.gz", hash = "sha256:88c7d9d58aab0724b979ab5617330acb1c7030b79379c8138c1c8c94e121d1b3"},
-]
-
-[package.dependencies]
-idna = ">=2.0"
-multidict = ">=4.0"
-propcache = ">=0.2.0"
-
-[[package]]
-name = "zarr"
-version = "2.18.3"
-description = "An implementation of chunked, compressed, N-dimensional arrays for Python"
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "zarr-2.18.3-py3-none-any.whl", hash = "sha256:b1f7dfd2496f436745cdd4c7bcf8d3b4bc1dceef5fdd0d589c87130d842496dd"},
- {file = "zarr-2.18.3.tar.gz", hash = "sha256:2580d8cb6dd84621771a10d31c4d777dca8a27706a1a89b29f42d2d37e2df5ce"},
-]
-
-[package.dependencies]
-asciitree = "*"
-fasteners = {version = "*", markers = "sys_platform != \"emscripten\""}
-numcodecs = ">=0.10.0"
-numpy = ">=1.24"
-
-[package.extras]
-docs = ["numcodecs[msgpack]", "numpydoc", "pydata-sphinx-theme", "sphinx", "sphinx-automodapi", "sphinx-copybutton", "sphinx-design", "sphinx-issues"]
-jupyter = ["ipytree (>=0.2.2)", "ipywidgets (>=8.0.0)", "notebook"]
-
-[[package]]
-name = "zipp"
-version = "3.20.2"
-description = "Backport of pathlib-compatible object wrapper for zip files"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
- {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
-]
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
-type = ["pytest-mypy"]
-
-[extras]
-aloha = ["gym-aloha"]
-dev = ["debugpy", "pre-commit"]
-dora = ["gym-dora"]
-dynamixel = ["dynamixel-sdk", "pynput"]
-feetech = ["feetech-servo-sdk", "pynput"]
-intelrealsense = ["pyrealsense2"]
-pusht = ["gym-pusht"]
-stretch = ["hello-robot-stretch-body", "pynput", "pyrealsense2", "pyrender"]
-test = ["pyserial", "pytest", "pytest-cov"]
-umi = ["imagecodecs"]
-video-benchmark = ["pandas", "scikit-image"]
-xarm = ["gym-xarm"]
-
-[metadata]
-lock-version = "2.0"
-python-versions = ">=3.10,<3.13"
-content-hash = "ee60d9251f6a6253d0c371707a72a500a6053d7925c6898e6663d9320ad11503"
diff --git a/pyproject.toml b/pyproject.toml
index 3351dea2a2..e0d754f530 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,125 +1,254 @@
-[tool.poetry]
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[build-system]
+requires = ["setuptools"]
+build-backend = "setuptools.build_meta"
+
+[project.urls]
+homepage = "https://huggingface.co/lerobot"
+documentation = "https://huggingface.co/docs/lerobot/index"
+source = "https://github.com/huggingface/lerobot"
+issues = "https://github.com/huggingface/lerobot/issues"
+discord = "https://discord.gg/s3KuuzsPFb"
+
+[project]
name = "lerobot"
version = "0.1.0"
description = "🤗 LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch"
+readme = "README.md"
+license = { text = "Apache-2.0" }
+requires-python = ">=3.10"
authors = [
- "Rémi Cadène ",
- "Simon Alibert ",
- "Alexander Soare ",
- "Quentin Gallouédec ",
- "Adil Zouitine ",
- "Thomas Wolf ",
+ { name = "Rémi Cadène", email = "re.cadene@gmail.com" },
+ { name = "Simon Alibert", email = "alibert.sim@gmail.com" },
+ { name = "Alexander Soare", email = "alexander.soare159@gmail.com" },
+ { name = "Quentin Gallouédec", email = "quentin.gallouedec@ec-lyon.fr" },
+ { name = "Steven Palma", email = "imstevenpmwork@ieee.org" },
+ { name = "Pepijn Kooijmans", email = "pepijnkooijmans@outlook.com"},
+ { name = "Michel Aractingi", email = "michel.aractingi@gmail.com"},
+ { name = "Adil Zouitine", email = "adilzouitinegm@gmail.com" },
+ { name = "Dana Aubakirova", email = "danaaubakirova17@gmail.com"},
+ { name = "Caroline Pascal", email = "caroline8.pascal@gmail.com"},
+ { name = "Martino Russi", email = "nopyeps@gmail.com"},
+ { name = "Thomas Wolf", email = "thomaswolfcontact@gmail.com" },
]
-repository = "https://github.com/huggingface/lerobot"
-readme = "README.md"
-license = "Apache-2.0"
-classifiers=[
+classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
- "Topic :: Software Development :: Build Tools",
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.10",
+ "Topic :: Software Development :: Build Tools",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+]
+keywords = ["lerobot", "huggingface", "robotics", "machine learning", "artificial intelligence"]
+
+dependencies = [
+
+ # Hugging Face dependencies
+ "datasets>=2.19.0,<=3.6.0", # TODO: Bumb dependency
+ "diffusers>=0.27.2",
+ "huggingface-hub[hf-transfer,cli]>=0.27.1",
+
+ # Core dependencies
+ "cmake>=3.29.0.1",
+ "einops>=0.8.0",
+ "opencv-python-headless>=4.9.0",
+ "av>=14.2.0",
+ "torch>=2.2.1",
+ "torchcodec>=0.2.1; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')",
+ "torchvision>=0.21.0",
+ "jsonlines>=4.0.0",
+ "packaging>=24.2",
+ "pynput>=1.7.7",
+ "pyserial>=3.5",
+ "wandb>=0.16.3",
+
+ "draccus==0.10.0", # TODO: Remove ==
+ "gymnasium>=0.29.1,<1.0.0", # TODO: Bumb dependency
+ "rerun-sdk>=0.21.0,<0.23.0", # TODO: Bumb dependency
+
+ # Support dependencies
+ "deepdiff>=7.0.1,<9.0.0",
+ "flask>=3.0.3,<4.0.0",
+ "imageio[ffmpeg]>=2.34.0,<3.0.0",
+ "termcolor>=2.4.0,<4.0.0",
+]
+
+# Optional dependencies
+[project.optional-dependencies]
+
+# Common
+pygame-dep = ["pygame>=2.5.1"]
+placo-dep = ["placo>=0.9.6"]
+transformers-dep = ["transformers>=4.50.3,<4.52.0"] # TODO: Bumb dependency
+grpcio-dep = ["grpcio==1.71.0"]
+
+# Motors
+feetech = ["feetech-servo-sdk>=1.0.0"]
+dynamixel = ["dynamixel-sdk>=3.7.31"]
+
+# Robots
+gamepad = ["lerobot[pygame-dep]", "hidapi>=0.14.0"]
+hopejr = ["lerobot[feetech]", "lerobot[pygame-dep]"]
+lekiwi = ["lerobot[feetech]", "pyzmq>=26.2.1"]
+kinematics = ["lerobot[placo-dep]"]
+intelrealsense = [
+ "pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'",
+ "pyrealsense2-macosx>=2.54 ; sys_platform == 'darwin'",
]
-packages = [{include = "lerobot"}]
-
-
-[tool.poetry.dependencies]
-python = ">=3.10,<3.13"
-termcolor = ">=2.4.0"
-omegaconf = ">=2.3.0"
-wandb = ">=0.16.3"
-imageio = {extras = ["ffmpeg"], version = ">=2.34.0"}
-gdown = ">=5.1.0"
-hydra-core = ">=1.3.2"
-einops = ">=0.8.0"
-pymunk = ">=6.6.0"
-zarr = ">=2.17.0"
-numba = ">=0.59.0"
-torch = ">=2.2.1"
-opencv-python = ">=4.9.0"
-diffusers = ">=0.27.2"
-torchvision = ">=0.17.1"
-h5py = ">=3.10.0"
-huggingface-hub = {extras = ["hf-transfer", "cli"], version = ">=0.25.2"}
-gymnasium = "==0.29.1" # TODO(rcadene, aliberts): Make gym 1.0.0 work
-cmake = ">=3.29.0.1"
-gym-dora = { git = "https://github.com/dora-rs/dora-lerobot.git", subdirectory = "gym_dora", optional = true }
-gym-pusht = { version = ">=0.1.5", optional = true}
-gym-xarm = { version = ">=0.1.1", optional = true}
-gym-aloha = { version = ">=0.1.1", optional = true}
-pre-commit = {version = ">=3.7.0", optional = true}
-debugpy = {version = ">=1.8.1", optional = true}
-pytest = {version = ">=8.1.0", optional = true}
-pytest-cov = {version = ">=5.0.0", optional = true}
-datasets = ">=2.19.0"
-imagecodecs = { version = ">=2024.1.1", optional = true }
-pyav = ">=12.0.5"
-rerun-sdk = ">=0.21.0"
-deepdiff = ">=7.0.1"
-flask = ">=3.0.3"
-pandas = {version = ">=2.2.2", optional = true}
-scikit-image = {version = ">=0.23.2", optional = true}
-dynamixel-sdk = {version = ">=3.7.31", optional = true}
-pynput = {version = ">=1.7.7", optional = true}
-feetech-servo-sdk = {version = ">=1.0.0", optional = true}
-setuptools = {version = "!=71.0.1", optional = true} # TODO(rcadene, aliberts): 71.0.1 has a bug
-pyrealsense2 = {version = ">=2.55.1.6486", markers = "sys_platform != 'darwin'", optional = true} # TODO(rcadene, aliberts): Fix on Mac
-pyrender = {git = "https://github.com/mmatl/pyrender.git", markers = "sys_platform == 'linux'", optional = true}
-hello-robot-stretch-body = {version = ">=0.7.27", markers = "sys_platform == 'linux'", optional = true}
-pyserial = {version = ">=3.5", optional = true}
-jsonlines = ">=4.0.0"
-
-
-[tool.poetry.extras]
-dora = ["gym-dora"]
-pusht = ["gym-pusht"]
-xarm = ["gym-xarm"]
-aloha = ["gym-aloha"]
-dev = ["pre-commit", "debugpy"]
-test = ["pytest", "pytest-cov", "pyserial"]
-umi = ["imagecodecs"]
-video_benchmark = ["scikit-image", "pandas"]
-dynamixel = ["dynamixel-sdk", "pynput"]
-feetech = ["feetech-servo-sdk", "pynput"]
-intelrealsense = ["pyrealsense2"]
-stretch = ["hello-robot-stretch-body", "pyrender", "pyrealsense2", "pynput"]
+# stretch = [
+# "hello-robot-stretch-body>=0.7.27 ; sys_platform == 'linux'",
+# "pyrender @ git+https://github.com/mmatl/pyrender.git ; sys_platform == 'linux'",
+# "pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'"
+# ] # TODO: Currently not supported
+
+# Policies
+pi0 = ["lerobot[transformers-dep]"]
+smolvla = ["lerobot[transformers-dep]", "num2words>=0.5.14", "accelerate>=1.7.0", "safetensors>=0.4.3"]
+hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.9", "protobuf>=5.29.3", "lerobot[grpcio-dep]", "lerobot[placo-dep]"]
+
+# Features
+async = ["lerobot[grpcio-dep]", "matplotlib>=3.10.3"]
+
+# Development
+docs = ["hf-doc-builder @ git+https://github.com/huggingface/doc-builder.git@main", "watchdog >= 6.0.0"]
+dev = ["pre-commit>=3.7.0", "debugpy>=1.8.1", "grpcio-tools==1.71.0"]
+test = ["pytest>=8.1.0", "pytest-timeout>=2.4.0", "pytest-cov>=5.0.0", "mock-serial>=0.0.1 ; sys_platform != 'win32'"]
+video_benchmark = ["scikit-image>=0.23.2", "pandas>=2.2.2"]
+
+# Simulation
+aloha = ["gym-aloha>=0.1.1"]
+pusht = ["gym-pusht>=0.1.5", "pymunk>=6.6.0,<7.0.0"] # TODO: Fix pymunk version in gym-pusht instead
+xarm = ["gym-xarm>=0.1.1"]
+
+# All
+all = [
+ "lerobot[dynamixel]",
+ "lerobot[gamepad]",
+ "lerobot[hopejr]",
+ "lerobot[lekiwi]",
+ "lerobot[kinematics]",
+ "lerobot[intelrealsense]",
+ "lerobot[pi0]",
+ "lerobot[smolvla]",
+ "lerobot[hilserl]",
+ "lerobot[async]",
+ "lerobot[docs]",
+ "lerobot[dev]",
+ "lerobot[test]",
+ "lerobot[video_benchmark]",
+ "lerobot[aloha]",
+ "lerobot[pusht]",
+ "lerobot[xarm]"
+]
+
+# ---------------- Tool Configurations ----------------
+[tool.setuptools.packages.find]
+where = ["src"]
[tool.ruff]
-line-length = 110
target-version = "py310"
-exclude = [
- "tests/data",
- ".bzr",
- ".direnv",
- ".eggs",
- ".git",
- ".git-rewrite",
- ".hg",
- ".mypy_cache",
- ".nox",
- ".pants.d",
- ".pytype",
- ".ruff_cache",
- ".svn",
- ".tox",
- ".venv",
- "__pypackages__",
- "_build",
- "buck-out",
- "build",
- "dist",
- "node_modules",
- "venv",
+line-length = 110
+exclude = ["tests/artifacts/**/*.safetensors", "*_pb2.py", "*_pb2_grpc.py"]
+
+[tool.ruff.lint]
+# E, W: pycodestyle errors and warnings
+# F: PyFlakes
+# I: isort
+# UP: pyupgrade
+# B: flake8-bugbear (good practices, potential bugs)
+# C4: flake8-comprehensions (more concise comprehensions)
+# A: flake8-builtins (shadowing builtins)
+# SIM: flake8-simplify
+# RUF: Ruff-specific rules
+# D: pydocstyle (for docstring style/formatting)
+# S: flake8-bandit (some security checks, complements Bandit)
+# T20: flake8-print (discourage print statements in production code)
+# N: pep8-naming
+# TODO: Uncomment rules when ready to use
+select = [
+ "E", "W", "F", "I", "B", "C4", "T20", "N" # "SIM", "A", "S", "D", "RUF", "UP"
+]
+ignore = [
+ "E501", # Line too long
+ "T201", # Print statement found
+ "T203", # Pprint statement found
+ "B008", # Perform function call in argument defaults
]
+[tool.ruff.lint.per-file-ignores]
+"__init__.py" = ["F401", "F403"]
-[tool.ruff.lint]
-select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
+[tool.ruff.lint.isort]
+combine-as-imports = true
+known-first-party = ["lerobot"]
+[tool.ruff.lint.pydocstyle]
+convention = "google"
-[build-system]
-requires = ["poetry-core"]
-build-backend = "poetry.core.masonry.api"
+[tool.ruff.format]
+quote-style = "double"
+indent-style = "space"
+skip-magic-trailing-comma = false
+line-ending = "auto"
+docstring-code-format = true
+
+[tool.bandit]
+exclude_dirs = [
+ "tests",
+ "benchmarks",
+ "src/lerobot/datasets/push_dataset_to_hub",
+ "src/lerobot/datasets/v2/convert_dataset_v1_to_v2",
+ "src/lerobot/policies/pi0/conversion_scripts",
+ "src/lerobot/scripts/push_dataset_to_hub.py",
+]
+skips = ["B101", "B311", "B404", "B603", "B615"]
+
+[tool.typos]
+default.extend-ignore-re = [
+ "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # spellchecker:disable-line
+ "(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on", # spellchecker:
+]
+default.extend-ignore-identifiers-re = [
+ # Add individual words here to ignore them
+ "2nd",
+ "pn",
+ "ser",
+ "ein",
+]
+
+# TODO: Uncomment when ready to use
+# [tool.interrogate]
+# ignore-init-module = true
+# ignore-init-method = true
+# ignore-nested-functions = false
+# ignore-magic = false
+# ignore-semiprivate = false
+# ignore-private = false
+# ignore-property-decorators = false
+# ignore-module = false
+# ignore-setters = false
+# fail-under = 80
+# output-format = "term-missing"
+# color = true
+# paths = ["src/lerobot"]
+
+# [tool.mypy]
+# python_version = "3.10"
+# warn_return_any = true
+# warn_unused_configs = true
+# ignore_missing_imports = false
diff --git a/src/lerobot/__init__.py b/src/lerobot/__init__.py
new file mode 100644
index 0000000000..38d4e86442
--- /dev/null
+++ b/src/lerobot/__init__.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library.
+We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables.
+
+Example:
+ ```python
+ import lerobot
+ print(lerobot.available_envs)
+ print(lerobot.available_tasks_per_env)
+ print(lerobot.available_datasets)
+ print(lerobot.available_datasets_per_env)
+ print(lerobot.available_real_world_datasets)
+ print(lerobot.available_policies)
+ print(lerobot.available_policies_per_env)
+ print(lerobot.available_robots)
+ print(lerobot.available_cameras)
+ print(lerobot.available_motors)
+ ```
+
+When implementing a new dataset loadable with LeRobotDataset follow these steps:
+- Update `available_datasets_per_env` in `lerobot/__init__.py`
+
+When implementing a new environment (e.g. `gym_aloha`), follow these steps:
+- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
+
+When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
+- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
+- Set the required `name` class attribute.
+- Update variables in `tests/test_available.py` by importing your new Policy class
+"""
+
+import itertools
+
+from lerobot.__version__ import __version__ # noqa: F401
+
+# TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies`
+# refers to a yaml file AND a modeling name. Same for `available_envs` which refers to
+# a yaml file AND a environment name. The difference should be more obvious.
+available_tasks_per_env = {
+ "aloha": [
+ "AlohaInsertion-v0",
+ "AlohaTransferCube-v0",
+ ],
+ "pusht": ["PushT-v0"],
+ "xarm": ["XarmLift-v0"],
+}
+available_envs = list(available_tasks_per_env.keys())
+
+available_datasets_per_env = {
+ "aloha": [
+ "lerobot/aloha_sim_insertion_human",
+ "lerobot/aloha_sim_insertion_scripted",
+ "lerobot/aloha_sim_transfer_cube_human",
+ "lerobot/aloha_sim_transfer_cube_scripted",
+ "lerobot/aloha_sim_insertion_human_image",
+ "lerobot/aloha_sim_insertion_scripted_image",
+ "lerobot/aloha_sim_transfer_cube_human_image",
+ "lerobot/aloha_sim_transfer_cube_scripted_image",
+ ],
+ # TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly
+ # coupled with tests.
+ "pusht": ["lerobot/pusht", "lerobot/pusht_image"],
+ "xarm": [
+ "lerobot/xarm_lift_medium",
+ "lerobot/xarm_lift_medium_replay",
+ "lerobot/xarm_push_medium",
+ "lerobot/xarm_push_medium_replay",
+ "lerobot/xarm_lift_medium_image",
+ "lerobot/xarm_lift_medium_replay_image",
+ "lerobot/xarm_push_medium_image",
+ "lerobot/xarm_push_medium_replay_image",
+ ],
+}
+
+available_real_world_datasets = [
+ "lerobot/aloha_mobile_cabinet",
+ "lerobot/aloha_mobile_chair",
+ "lerobot/aloha_mobile_elevator",
+ "lerobot/aloha_mobile_shrimp",
+ "lerobot/aloha_mobile_wash_pan",
+ "lerobot/aloha_mobile_wipe_wine",
+ "lerobot/aloha_static_battery",
+ "lerobot/aloha_static_candy",
+ "lerobot/aloha_static_coffee",
+ "lerobot/aloha_static_coffee_new",
+ "lerobot/aloha_static_cups_open",
+ "lerobot/aloha_static_fork_pick_up",
+ "lerobot/aloha_static_pingpong_test",
+ "lerobot/aloha_static_pro_pencil",
+ "lerobot/aloha_static_screw_driver",
+ "lerobot/aloha_static_tape",
+ "lerobot/aloha_static_thread_velcro",
+ "lerobot/aloha_static_towel",
+ "lerobot/aloha_static_vinh_cup",
+ "lerobot/aloha_static_vinh_cup_left",
+ "lerobot/aloha_static_ziploc_slide",
+ "lerobot/umi_cup_in_the_wild",
+ "lerobot/unitreeh1_fold_clothes",
+ "lerobot/unitreeh1_rearrange_objects",
+ "lerobot/unitreeh1_two_robot_greeting",
+ "lerobot/unitreeh1_warehouse",
+ "lerobot/nyu_rot_dataset",
+ "lerobot/utokyo_saytap",
+ "lerobot/imperialcollege_sawyer_wrist_cam",
+ "lerobot/utokyo_xarm_bimanual",
+ "lerobot/tokyo_u_lsmo",
+ "lerobot/utokyo_pr2_opening_fridge",
+ "lerobot/cmu_franka_exploration_dataset",
+ "lerobot/cmu_stretch",
+ "lerobot/asu_table_top",
+ "lerobot/utokyo_pr2_tabletop_manipulation",
+ "lerobot/utokyo_xarm_pick_and_place",
+ "lerobot/ucsd_kitchen_dataset",
+ "lerobot/austin_buds_dataset",
+ "lerobot/dlr_sara_grid_clamp",
+ "lerobot/conq_hose_manipulation",
+ "lerobot/columbia_cairlab_pusht_real",
+ "lerobot/dlr_sara_pour",
+ "lerobot/dlr_edan_shared_control",
+ "lerobot/ucsd_pick_and_place_dataset",
+ "lerobot/berkeley_cable_routing",
+ "lerobot/nyu_franka_play_dataset",
+ "lerobot/austin_sirius_dataset",
+ "lerobot/cmu_play_fusion",
+ "lerobot/berkeley_gnm_sac_son",
+ "lerobot/nyu_door_opening_surprising_effectiveness",
+ "lerobot/berkeley_fanuc_manipulation",
+ "lerobot/jaco_play",
+ "lerobot/viola",
+ "lerobot/kaist_nonprehensile",
+ "lerobot/berkeley_mvp",
+ "lerobot/uiuc_d3field",
+ "lerobot/berkeley_gnm_recon",
+ "lerobot/austin_sailor_dataset",
+ "lerobot/utaustin_mutex",
+ "lerobot/roboturk",
+ "lerobot/stanford_hydra_dataset",
+ "lerobot/berkeley_autolab_ur5",
+ "lerobot/stanford_robocook",
+ "lerobot/toto",
+ "lerobot/fmb",
+ "lerobot/droid_100",
+ "lerobot/berkeley_rpt",
+ "lerobot/stanford_kuka_multimodal_dataset",
+ "lerobot/iamlab_cmu_pickup_insert",
+ "lerobot/taco_play",
+ "lerobot/berkeley_gnm_cory_hall",
+ "lerobot/usc_cloth_sim",
+]
+
+available_datasets = sorted(
+ set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets))
+)
+
+# lists all available policies from `lerobot/policies`
+available_policies = ["act", "diffusion", "tdmpc", "vqbet"]
+
+# lists all available robots from `lerobot/robot_devices/robots`
+available_robots = [
+ "koch",
+ "koch_bimanual",
+ "aloha",
+ "so100",
+ "so101",
+]
+
+# lists all available cameras from `lerobot/robot_devices/cameras`
+available_cameras = [
+ "opencv",
+ "intelrealsense",
+]
+
+# lists all available motors from `lerobot/robot_devices/motors`
+available_motors = [
+ "dynamixel",
+ "feetech",
+]
+
+# keys and values refer to yaml files
+available_policies_per_env = {
+ "aloha": ["act"],
+ "pusht": ["diffusion", "vqbet"],
+ "xarm": ["tdmpc"],
+ "koch_real": ["act_koch_real"],
+ "aloha_real": ["act_aloha_real"],
+}
+
+env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
+env_dataset_pairs = [
+ (env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets
+]
+env_dataset_policy_triplets = [
+ (env, dataset, policy)
+ for env, datasets in available_datasets_per_env.items()
+ for dataset in datasets
+ for policy in available_policies_per_env[env]
+]
diff --git a/lerobot/__version__.py b/src/lerobot/__version__.py
similarity index 100%
rename from lerobot/__version__.py
rename to src/lerobot/__version__.py
diff --git a/src/lerobot/calibrate.py b/src/lerobot/calibrate.py
new file mode 100644
index 0000000000..1e8bf4751f
--- /dev/null
+++ b/src/lerobot/calibrate.py
@@ -0,0 +1,86 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Helper to recalibrate your device (robot or teleoperator).
+
+Example:
+
+```shell
+python -m lerobot.calibrate \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=blue
+```
+"""
+
+import logging
+from dataclasses import asdict, dataclass
+from pprint import pformat
+
+import draccus
+
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
+from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
+from lerobot.robots import ( # noqa: F401
+ Robot,
+ RobotConfig,
+ hope_jr,
+ koch_follower,
+ lekiwi,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.teleoperators import ( # noqa: F401
+ Teleoperator,
+ TeleoperatorConfig,
+ homunculus,
+ koch_leader,
+ make_teleoperator_from_config,
+ so100_leader,
+ so101_leader,
+)
+from lerobot.utils.utils import init_logging
+
+
+@dataclass
+class CalibrateConfig:
+ teleop: TeleoperatorConfig | None = None
+ robot: RobotConfig | None = None
+
+ def __post_init__(self):
+ if bool(self.teleop) == bool(self.robot):
+ raise ValueError("Choose either a teleop or a robot.")
+
+ self.device = self.robot if self.robot else self.teleop
+
+
+@draccus.wrap()
+def calibrate(cfg: CalibrateConfig):
+ init_logging()
+ logging.info(pformat(asdict(cfg)))
+
+ if isinstance(cfg.device, RobotConfig):
+ device = make_robot_from_config(cfg.device)
+ elif isinstance(cfg.device, TeleoperatorConfig):
+ device = make_teleoperator_from_config(cfg.device)
+
+ device.connect(calibrate=False)
+ device.calibrate()
+ device.disconnect()
+
+
+if __name__ == "__main__":
+ calibrate()
diff --git a/src/lerobot/cameras/__init__.py b/src/lerobot/cameras/__init__.py
new file mode 100644
index 0000000000..1488cd89ea
--- /dev/null
+++ b/src/lerobot/cameras/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .camera import Camera
+from .configs import CameraConfig, ColorMode, Cv2Rotation
+from .utils import make_cameras_from_configs
diff --git a/src/lerobot/cameras/camera.py b/src/lerobot/cameras/camera.py
new file mode 100644
index 0000000000..e435c7309a
--- /dev/null
+++ b/src/lerobot/cameras/camera.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+from typing import Any
+
+import numpy as np
+
+from .configs import CameraConfig, ColorMode
+
+
+class Camera(abc.ABC):
+ """Base class for camera implementations.
+
+ Defines a standard interface for camera operations across different backends.
+ Subclasses must implement all abstract methods.
+
+ Manages basic camera properties (FPS, resolution) and core operations:
+ - Connection/disconnection
+ - Frame capture (sync/async)
+
+ Attributes:
+ fps (int | None): Configured frames per second
+ width (int | None): Frame width in pixels
+ height (int | None): Frame height in pixels
+
+ Example:
+ class MyCamera(Camera):
+ def __init__(self, config): ...
+ @property
+ def is_connected(self) -> bool: ...
+ def connect(self, warmup=True): ...
+ # Plus other required methods
+ """
+
+ def __init__(self, config: CameraConfig):
+ """Initialize the camera with the given configuration.
+
+ Args:
+ config: Camera configuration containing FPS and resolution.
+ """
+ self.fps: int | None = config.fps
+ self.width: int | None = config.width
+ self.height: int | None = config.height
+
+ @property
+ @abc.abstractmethod
+ def is_connected(self) -> bool:
+ """Check if the camera is currently connected.
+
+ Returns:
+ bool: True if the camera is connected and ready to capture frames,
+ False otherwise.
+ """
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def find_cameras() -> list[dict[str, Any]]:
+ """Detects available cameras connected to the system.
+ Returns:
+ List[Dict[str, Any]]: A list of dictionaries,
+ where each dictionary contains information about a detected camera.
+ """
+ pass
+
+ @abc.abstractmethod
+ def connect(self, warmup: bool = True) -> None:
+ """Establish connection to the camera.
+
+ Args:
+ warmup: If True (default), captures a warmup frame before returning. Useful
+ for cameras that require time to adjust capture settings.
+ If False, skips the warmup frame.
+ """
+ pass
+
+ @abc.abstractmethod
+ def read(self, color_mode: ColorMode | None = None) -> np.ndarray:
+ """Capture and return a single frame from the camera.
+
+ Args:
+ color_mode: Desired color mode for the output frame. If None,
+ uses the camera's default color mode.
+
+ Returns:
+ np.ndarray: Captured frame as a numpy array.
+ """
+ pass
+
+ @abc.abstractmethod
+ def async_read(self, timeout_ms: float = ...) -> np.ndarray:
+ """Asynchronously capture and return a single frame from the camera.
+
+ Args:
+ timeout_ms: Maximum time to wait for a frame in milliseconds.
+ Defaults to implementation-specific timeout.
+
+ Returns:
+ np.ndarray: Captured frame as a numpy array.
+ """
+ pass
+
+ @abc.abstractmethod
+ def disconnect(self) -> None:
+ """Disconnect from the camera and release resources."""
+ pass
diff --git a/src/lerobot/cameras/configs.py b/src/lerobot/cameras/configs.py
new file mode 100644
index 0000000000..0488a97ffd
--- /dev/null
+++ b/src/lerobot/cameras/configs.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+from dataclasses import dataclass
+from enum import Enum
+
+import draccus
+
+
+class ColorMode(str, Enum):
+ RGB = "rgb"
+ BGR = "bgr"
+
+
+class Cv2Rotation(int, Enum):
+ NO_ROTATION = 0
+ ROTATE_90 = 90
+ ROTATE_180 = 180
+ ROTATE_270 = -90
+
+
+@dataclass(kw_only=True)
+class CameraConfig(draccus.ChoiceRegistry, abc.ABC):
+ fps: int | None = None
+ width: int | None = None
+ height: int | None = None
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
diff --git a/src/lerobot/cameras/opencv/__init__.py b/src/lerobot/cameras/opencv/__init__.py
new file mode 100644
index 0000000000..11d3139fe2
--- /dev/null
+++ b/src/lerobot/cameras/opencv/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .camera_opencv import OpenCVCamera
+from .configuration_opencv import OpenCVCameraConfig
diff --git a/src/lerobot/cameras/opencv/camera_opencv.py b/src/lerobot/cameras/opencv/camera_opencv.py
new file mode 100644
index 0000000000..7ad9988cca
--- /dev/null
+++ b/src/lerobot/cameras/opencv/camera_opencv.py
@@ -0,0 +1,486 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides the OpenCVCamera class for capturing frames from cameras using OpenCV.
+"""
+
+import logging
+import math
+import os
+import platform
+import time
+from pathlib import Path
+from threading import Event, Lock, Thread
+from typing import Any
+
+# Fix MSMF hardware transform compatibility for Windows before importing cv2
+if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ:
+ os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0"
+import cv2
+import numpy as np
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+
+from ..camera import Camera
+from ..utils import get_cv2_backend, get_cv2_rotation
+from .configuration_opencv import ColorMode, OpenCVCameraConfig
+
+# NOTE(Steven): The maximum opencv device index depends on your operating system. For instance,
+# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case
+# on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23.
+# When you change the USB port or reboot the computer, the operating system might
+# treat the same cameras as new devices. Thus we select a higher bound to search indices.
+MAX_OPENCV_INDEX = 60
+
+logger = logging.getLogger(__name__)
+
+
+class OpenCVCamera(Camera):
+ """
+ Manages camera interactions using OpenCV for efficient frame recording.
+
+ This class provides a high-level interface to connect to, configure, and read
+ frames from cameras compatible with OpenCV's VideoCapture. It supports both
+ synchronous and asynchronous frame reading.
+
+ An OpenCVCamera instance requires a camera index (e.g., 0) or a device path
+ (e.g., '/dev/video0' on Linux). Camera indices can be unstable across reboots
+ or port changes, especially on Linux. Use the provided utility script to find
+ available camera indices or paths:
+ ```bash
+ python -m lerobot.find_cameras opencv
+ ```
+
+ The camera's default settings (FPS, resolution, color mode) are used unless
+ overridden in the configuration.
+
+ Example:
+ ```python
+ from lerobot.cameras.opencv import OpenCVCamera
+ from lerobot.cameras.configuration_opencv import OpenCVCameraConfig, ColorMode, Cv2Rotation
+
+ # Basic usage with camera index 0
+ config = OpenCVCameraConfig(index_or_path=0)
+ camera = OpenCVCamera(config)
+ camera.connect()
+
+ # Read 1 frame synchronously
+ color_image = camera.read()
+ print(color_image.shape)
+
+ # Read 1 frame asynchronously
+ async_image = camera.async_read()
+
+ # When done, properly disconnect the camera using
+ camera.disconnect()
+
+ # Example with custom settings
+ custom_config = OpenCVCameraConfig(
+ index_or_path='/dev/video0', # Or use an index
+ fps=30,
+ width=1280,
+ height=720,
+ color_mode=ColorMode.RGB,
+ rotation=Cv2Rotation.ROTATE_90
+ )
+ custom_camera = OpenCVCamera(custom_config)
+ # ... connect, read, disconnect ...
+ ```
+ """
+
+ def __init__(self, config: OpenCVCameraConfig):
+ """
+ Initializes the OpenCVCamera instance.
+
+ Args:
+ config: The configuration settings for the camera.
+ """
+ super().__init__(config)
+
+ self.config = config
+ self.index_or_path = config.index_or_path
+
+ self.fps = config.fps
+ self.color_mode = config.color_mode
+ self.warmup_s = config.warmup_s
+
+ self.videocapture: cv2.VideoCapture | None = None
+
+ self.thread: Thread | None = None
+ self.stop_event: Event | None = None
+ self.frame_lock: Lock = Lock()
+ self.latest_frame: np.ndarray | None = None
+ self.new_frame_event: Event = Event()
+
+ self.rotation: int | None = get_cv2_rotation(config.rotation)
+ self.backend: int = get_cv2_backend()
+
+ if self.height and self.width:
+ self.capture_width, self.capture_height = self.width, self.height
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
+ self.capture_width, self.capture_height = self.height, self.width
+
+ def __str__(self) -> str:
+ return f"{self.__class__.__name__}({self.index_or_path})"
+
+ @property
+ def is_connected(self) -> bool:
+ """Checks if the camera is currently connected and opened."""
+ return isinstance(self.videocapture, cv2.VideoCapture) and self.videocapture.isOpened()
+
+ def connect(self, warmup: bool = True):
+ """
+ Connects to the OpenCV camera specified in the configuration.
+
+ Initializes the OpenCV VideoCapture object, sets desired camera properties
+ (FPS, width, height), and performs initial checks.
+
+ Raises:
+ DeviceAlreadyConnectedError: If the camera is already connected.
+ ConnectionError: If the specified camera index/path is not found or the camera is found but fails to open.
+ RuntimeError: If the camera opens but fails to apply requested FPS/resolution settings.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} is already connected.")
+
+ # Use 1 thread for OpenCV operations to avoid potential conflicts or
+ # blocking in multi-threaded applications, especially during data collection.
+ cv2.setNumThreads(1)
+
+ self.videocapture = cv2.VideoCapture(self.index_or_path, self.backend)
+
+ if not self.videocapture.isOpened():
+ self.videocapture.release()
+ self.videocapture = None
+ raise ConnectionError(
+ f"Failed to open {self}."
+ f"Run `python -m lerobot.find_cameras opencv` to find available cameras."
+ )
+
+ self._configure_capture_settings()
+
+ if warmup:
+ start_time = time.time()
+ while time.time() - start_time < self.warmup_s:
+ self.read()
+ time.sleep(0.1)
+
+ logger.info(f"{self} connected.")
+
+ def _configure_capture_settings(self) -> None:
+ """
+ Applies the specified FPS, width, and height settings to the connected camera.
+
+ This method attempts to set the camera properties via OpenCV. It checks if
+ the camera successfully applied the settings and raises an error if not.
+
+ Args:
+ fps: The desired frames per second. If None, the setting is skipped.
+ width: The desired capture width. If None, the setting is skipped.
+ height: The desired capture height. If None, the setting is skipped.
+
+ Raises:
+ RuntimeError: If the camera fails to set any of the specified properties
+ to the requested value.
+ DeviceNotConnectedError: If the camera is not connected when attempting
+ to configure settings.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"Cannot configure settings for {self} as it is not connected.")
+
+ if self.fps is None:
+ self.fps = self.videocapture.get(cv2.CAP_PROP_FPS)
+ else:
+ self._validate_fps()
+
+ default_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH)))
+ default_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
+
+ if self.width is None or self.height is None:
+ self.width, self.height = default_width, default_height
+ self.capture_width, self.capture_height = default_width, default_height
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
+ self.width, self.height = default_height, default_width
+ self.capture_width, self.capture_height = default_width, default_height
+ else:
+ self._validate_width_and_height()
+
+ def _validate_fps(self) -> None:
+ """Validates and sets the camera's frames per second (FPS)."""
+
+ success = self.videocapture.set(cv2.CAP_PROP_FPS, float(self.fps))
+ actual_fps = self.videocapture.get(cv2.CAP_PROP_FPS)
+ # Use math.isclose for robust float comparison
+ if not success or not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
+ raise RuntimeError(f"{self} failed to set fps={self.fps} ({actual_fps=}).")
+
+ def _validate_width_and_height(self) -> None:
+ """Validates and sets the camera's frame capture width and height."""
+
+ width_success = self.videocapture.set(cv2.CAP_PROP_FRAME_WIDTH, float(self.capture_width))
+ height_success = self.videocapture.set(cv2.CAP_PROP_FRAME_HEIGHT, float(self.capture_height))
+
+ actual_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH)))
+ if not width_success or self.capture_width != actual_width:
+ raise RuntimeError(
+ f"{self} failed to set capture_width={self.capture_width} ({actual_width=}, {width_success=})."
+ )
+
+ actual_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
+ if not height_success or self.capture_height != actual_height:
+ raise RuntimeError(
+ f"{self} failed to set capture_height={self.capture_height} ({actual_height=}, {height_success=})."
+ )
+
+ @staticmethod
+ def find_cameras() -> list[dict[str, Any]]:
+ """
+ Detects available OpenCV cameras connected to the system.
+
+ On Linux, it scans '/dev/video*' paths. On other systems (like macOS, Windows),
+ it checks indices from 0 up to `MAX_OPENCV_INDEX`.
+
+ Returns:
+ List[Dict[str, Any]]: A list of dictionaries,
+ where each dictionary contains 'type', 'id' (port index or path),
+ and the default profile properties (width, height, fps, format).
+ """
+ found_cameras_info = []
+
+ if platform.system() == "Linux":
+ possible_paths = sorted(Path("/dev").glob("video*"), key=lambda p: p.name)
+ targets_to_scan = [str(p) for p in possible_paths]
+ else:
+ targets_to_scan = list(range(MAX_OPENCV_INDEX))
+
+ for target in targets_to_scan:
+ camera = cv2.VideoCapture(target)
+ if camera.isOpened():
+ default_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
+ default_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ default_fps = camera.get(cv2.CAP_PROP_FPS)
+ default_format = camera.get(cv2.CAP_PROP_FORMAT)
+ camera_info = {
+ "name": f"OpenCV Camera @ {target}",
+ "type": "OpenCV",
+ "id": target,
+ "backend_api": camera.getBackendName(),
+ "default_stream_profile": {
+ "format": default_format,
+ "width": default_width,
+ "height": default_height,
+ "fps": default_fps,
+ },
+ }
+
+ found_cameras_info.append(camera_info)
+ camera.release()
+
+ return found_cameras_info
+
+ def read(self, color_mode: ColorMode | None = None) -> np.ndarray:
+ """
+ Reads a single frame synchronously from the camera.
+
+ This is a blocking call. It waits for the next available frame from the
+ camera hardware via OpenCV.
+
+ Args:
+ color_mode (Optional[ColorMode]): If specified, overrides the default
+ color mode (`self.color_mode`) for this read operation (e.g.,
+ request RGB even if default is BGR).
+
+ Returns:
+ np.ndarray: The captured frame as a NumPy array in the format
+ (height, width, channels), using the specified or default
+ color mode and applying any configured rotation.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is not connected.
+ RuntimeError: If reading the frame from the camera fails or if the
+ received frame dimensions don't match expectations before rotation.
+ ValueError: If an invalid `color_mode` is requested.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ start_time = time.perf_counter()
+
+ ret, frame = self.videocapture.read()
+
+ if not ret or frame is None:
+ raise RuntimeError(f"{self} read failed (status={ret}).")
+
+ processed_frame = self._postprocess_image(frame, color_mode)
+
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
+
+ return processed_frame
+
+ def _postprocess_image(self, image: np.ndarray, color_mode: ColorMode | None = None) -> np.ndarray:
+ """
+ Applies color conversion, dimension validation, and rotation to a raw frame.
+
+ Args:
+ image (np.ndarray): The raw image frame (expected BGR format from OpenCV).
+ color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None,
+ uses the instance's default `self.color_mode`.
+
+ Returns:
+ np.ndarray: The processed image frame.
+
+ Raises:
+ ValueError: If the requested `color_mode` is invalid.
+ RuntimeError: If the raw frame dimensions do not match the configured
+ `width` and `height`.
+ """
+ requested_color_mode = self.color_mode if color_mode is None else color_mode
+
+ if requested_color_mode not in (ColorMode.RGB, ColorMode.BGR):
+ raise ValueError(
+ f"Invalid color mode '{requested_color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}."
+ )
+
+ h, w, c = image.shape
+
+ if h != self.capture_height or w != self.capture_width:
+ raise RuntimeError(
+ f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}."
+ )
+
+ if c != 3:
+ raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).")
+
+ processed_image = image
+ if requested_color_mode == ColorMode.RGB:
+ processed_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
+ processed_image = cv2.rotate(processed_image, self.rotation)
+
+ return processed_image
+
+ def _read_loop(self):
+ """
+ Internal loop run by the background thread for asynchronous reading.
+
+ On each iteration:
+ 1. Reads a color frame
+ 2. Stores result in latest_frame (thread-safe)
+ 3. Sets new_frame_event to notify listeners
+
+ Stops on DeviceNotConnectedError, logs other errors and continues.
+ """
+ while not self.stop_event.is_set():
+ try:
+ color_image = self.read()
+
+ with self.frame_lock:
+ self.latest_frame = color_image
+ self.new_frame_event.set()
+
+ except DeviceNotConnectedError:
+ break
+ except Exception as e:
+ logger.warning(f"Error reading frame in background thread for {self}: {e}")
+
+ def _start_read_thread(self) -> None:
+ """Starts or restarts the background read thread if it's not running."""
+ if self.thread is not None and self.thread.is_alive():
+ self.thread.join(timeout=0.1)
+ if self.stop_event is not None:
+ self.stop_event.set()
+
+ self.stop_event = Event()
+ self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop")
+ self.thread.daemon = True
+ self.thread.start()
+
+ def _stop_read_thread(self) -> None:
+ """Signals the background read thread to stop and waits for it to join."""
+ if self.stop_event is not None:
+ self.stop_event.set()
+
+ if self.thread is not None and self.thread.is_alive():
+ self.thread.join(timeout=2.0)
+
+ self.thread = None
+ self.stop_event = None
+
+ def async_read(self, timeout_ms: float = 200) -> np.ndarray:
+ """
+ Reads the latest available frame asynchronously.
+
+ This method retrieves the most recent frame captured by the background
+ read thread. It does not block waiting for the camera hardware directly,
+ but may wait up to timeout_ms for the background thread to provide a frame.
+
+ Args:
+ timeout_ms (float): Maximum time in milliseconds to wait for a frame
+ to become available. Defaults to 200ms (0.2 seconds).
+
+ Returns:
+ np.ndarray: The latest captured frame as a NumPy array in the format
+ (height, width, channels), processed according to configuration.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is not connected.
+ TimeoutError: If no frame becomes available within the specified timeout.
+ RuntimeError: If an unexpected error occurs.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ if self.thread is None or not self.thread.is_alive():
+ self._start_read_thread()
+
+ if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0):
+ thread_alive = self.thread is not None and self.thread.is_alive()
+ raise TimeoutError(
+ f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. "
+ f"Read thread alive: {thread_alive}."
+ )
+
+ with self.frame_lock:
+ frame = self.latest_frame
+ self.new_frame_event.clear()
+
+ if frame is None:
+ raise RuntimeError(f"Internal error: Event set but no frame available for {self}.")
+
+ return frame
+
+ def disconnect(self):
+ """
+ Disconnects from the camera and cleans up resources.
+
+ Stops the background read thread (if running) and releases the OpenCV
+ VideoCapture object.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is already disconnected.
+ """
+ if not self.is_connected and self.thread is None:
+ raise DeviceNotConnectedError(f"{self} not connected.")
+
+ if self.thread is not None:
+ self._stop_read_thread()
+
+ if self.videocapture is not None:
+ self.videocapture.release()
+ self.videocapture = None
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/cameras/opencv/configuration_opencv.py b/src/lerobot/cameras/opencv/configuration_opencv.py
new file mode 100644
index 0000000000..3ac92de36e
--- /dev/null
+++ b/src/lerobot/cameras/opencv/configuration_opencv.py
@@ -0,0 +1,73 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+from pathlib import Path
+
+from ..configs import CameraConfig, ColorMode, Cv2Rotation
+
+
+@CameraConfig.register_subclass("opencv")
+@dataclass
+class OpenCVCameraConfig(CameraConfig):
+ """Configuration class for OpenCV-based camera devices or video files.
+
+ This class provides configuration options for cameras accessed through OpenCV,
+ supporting both physical camera devices and video files. It includes settings
+ for resolution, frame rate, color mode, and image rotation.
+
+ Example configurations:
+ ```python
+ # Basic configurations
+ OpenCVCameraConfig(0, 30, 1280, 720) # 1280x720 @ 30FPS
+ OpenCVCameraConfig(/dev/video4, 60, 640, 480) # 640x480 @ 60FPS
+
+ # Advanced configurations
+ OpenCVCameraConfig(128422271347, 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation
+ ```
+
+ Attributes:
+ index_or_path: Either an integer representing the camera device index,
+ or a Path object pointing to a video file.
+ fps: Requested frames per second for the color stream.
+ width: Requested frame width in pixels for the color stream.
+ height: Requested frame height in pixels for the color stream.
+ color_mode: Color mode for image output (RGB or BGR). Defaults to RGB.
+ rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation.
+ warmup_s: Time reading frames before returning from connect (in seconds)
+
+ Note:
+ - Only 3-channel color output (RGB/BGR) is currently supported.
+ """
+
+ index_or_path: int | Path
+ color_mode: ColorMode = ColorMode.RGB
+ rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION
+ warmup_s: int = 1
+
+ def __post_init__(self):
+ if self.color_mode not in (ColorMode.RGB, ColorMode.BGR):
+ raise ValueError(
+ f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided."
+ )
+
+ if self.rotation not in (
+ Cv2Rotation.NO_ROTATION,
+ Cv2Rotation.ROTATE_90,
+ Cv2Rotation.ROTATE_180,
+ Cv2Rotation.ROTATE_270,
+ ):
+ raise ValueError(
+ f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided."
+ )
diff --git a/src/lerobot/cameras/realsense/__init__.py b/src/lerobot/cameras/realsense/__init__.py
new file mode 100644
index 0000000000..67f2f4000d
--- /dev/null
+++ b/src/lerobot/cameras/realsense/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .camera_realsense import RealSenseCamera
+from .configuration_realsense import RealSenseCameraConfig
diff --git a/src/lerobot/cameras/realsense/camera_realsense.py b/src/lerobot/cameras/realsense/camera_realsense.py
new file mode 100644
index 0000000000..74b055fa48
--- /dev/null
+++ b/src/lerobot/cameras/realsense/camera_realsense.py
@@ -0,0 +1,556 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides the RealSenseCamera class for capturing frames from Intel RealSense cameras.
+"""
+
+import logging
+import time
+from threading import Event, Lock, Thread
+from typing import Any
+
+import cv2
+import numpy as np
+
+try:
+ import pyrealsense2 as rs
+except Exception as e:
+ logging.info(f"Could not import realsense: {e}")
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+
+from ..camera import Camera
+from ..configs import ColorMode
+from ..utils import get_cv2_rotation
+from .configuration_realsense import RealSenseCameraConfig
+
+logger = logging.getLogger(__name__)
+
+
+class RealSenseCamera(Camera):
+ """
+ Manages interactions with Intel RealSense cameras for frame and depth recording.
+
+ This class provides an interface similar to `OpenCVCamera` but tailored for
+ RealSense devices, leveraging the `pyrealsense2` library. It uses the camera's
+ unique serial number for identification, offering more stability than device
+ indices, especially on Linux. It also supports capturing depth maps alongside
+ color frames.
+
+ Use the provided utility script to find available camera indices and default profiles:
+ ```bash
+ python -m lerobot.find_cameras realsense
+ ```
+
+ A `RealSenseCamera` instance requires a configuration object specifying the
+ camera's serial number or a unique device name. If using the name, ensure only
+ one camera with that name is connected.
+
+ The camera's default settings (FPS, resolution, color mode) from the stream
+ profile are used unless overridden in the configuration.
+
+ Example:
+ ```python
+ from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig
+ from lerobot.cameras import ColorMode, Cv2Rotation
+
+ # Basic usage with serial number
+ config = RealSenseCameraConfig(serial_number_or_name="0123456789") # Replace with actual SN
+ camera = RealSenseCamera(config)
+ camera.connect()
+
+ # Read 1 frame synchronously
+ color_image = camera.read()
+ print(color_image.shape)
+
+ # Read 1 frame asynchronously
+ async_image = camera.async_read()
+
+ # When done, properly disconnect the camera using
+ camera.disconnect()
+
+ # Example with depth capture and custom settings
+ custom_config = RealSenseCameraConfig(
+ serial_number_or_name="0123456789", # Replace with actual SN
+ fps=30,
+ width=1280,
+ height=720,
+ color_mode=ColorMode.BGR, # Request BGR output
+ rotation=Cv2Rotation.NO_ROTATION,
+ use_depth=True
+ )
+ depth_camera = RealSenseCamera(custom_config)
+ depth_camera.connect()
+
+ # Read 1 depth frame
+ depth_map = depth_camera.read_depth()
+
+ # Example using a unique camera name
+ name_config = RealSenseCameraConfig(serial_number_or_name="Intel RealSense D435") # If unique
+ name_camera = RealSenseCamera(name_config)
+ # ... connect, read, disconnect ...
+ ```
+ """
+
+ def __init__(self, config: RealSenseCameraConfig):
+ """
+ Initializes the RealSenseCamera instance.
+
+ Args:
+ config: The configuration settings for the camera.
+ """
+
+ super().__init__(config)
+
+ self.config = config
+
+ if config.serial_number_or_name.isdigit():
+ self.serial_number = config.serial_number_or_name
+ else:
+ self.serial_number = self._find_serial_number_from_name(config.serial_number_or_name)
+
+ self.fps = config.fps
+ self.color_mode = config.color_mode
+ self.use_depth = config.use_depth
+ self.warmup_s = config.warmup_s
+
+ self.rs_pipeline: rs.pipeline | None = None
+ self.rs_profile: rs.pipeline_profile | None = None
+
+ self.thread: Thread | None = None
+ self.stop_event: Event | None = None
+ self.frame_lock: Lock = Lock()
+ self.latest_frame: np.ndarray | None = None
+ self.new_frame_event: Event = Event()
+
+ self.rotation: int | None = get_cv2_rotation(config.rotation)
+
+ if self.height and self.width:
+ self.capture_width, self.capture_height = self.width, self.height
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
+ self.capture_width, self.capture_height = self.height, self.width
+
+ def __str__(self) -> str:
+ return f"{self.__class__.__name__}({self.serial_number})"
+
+ @property
+ def is_connected(self) -> bool:
+ """Checks if the camera pipeline is started and streams are active."""
+ return self.rs_pipeline is not None and self.rs_profile is not None
+
+ def connect(self, warmup: bool = True):
+ """
+ Connects to the RealSense camera specified in the configuration.
+
+ Initializes the RealSense pipeline, configures the required streams (color
+ and optionally depth), starts the pipeline, and validates the actual stream settings.
+
+ Raises:
+ DeviceAlreadyConnectedError: If the camera is already connected.
+ ValueError: If the configuration is invalid (e.g., missing serial/name, name not unique).
+ ConnectionError: If the camera is found but fails to start the pipeline or no RealSense devices are detected at all.
+ RuntimeError: If the pipeline starts but fails to apply requested settings.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} is already connected.")
+
+ self.rs_pipeline = rs.pipeline()
+ rs_config = rs.config()
+ self._configure_rs_pipeline_config(rs_config)
+
+ try:
+ self.rs_profile = self.rs_pipeline.start(rs_config)
+ except RuntimeError as e:
+ self.rs_profile = None
+ self.rs_pipeline = None
+ raise ConnectionError(
+ f"Failed to open {self}."
+ "Run `python -m lerobot.find_cameras realsense` to find available cameras."
+ ) from e
+
+ self._configure_capture_settings()
+
+ if warmup:
+ time.sleep(
+ 1
+ ) # NOTE(Steven): RS cameras need a bit of time to warm up before the first read. If we don't wait, the first read from the warmup will raise.
+ start_time = time.time()
+ while time.time() - start_time < self.warmup_s:
+ self.read()
+ time.sleep(0.1)
+
+ logger.info(f"{self} connected.")
+
+ @staticmethod
+ def find_cameras() -> list[dict[str, Any]]:
+ """
+ Detects available Intel RealSense cameras connected to the system.
+
+ Returns:
+ List[Dict[str, Any]]: A list of dictionaries,
+ where each dictionary contains 'type', 'id' (serial number), 'name',
+ firmware version, USB type, and other available specs, and the default profile properties (width, height, fps, format).
+
+ Raises:
+ OSError: If pyrealsense2 is not installed.
+ ImportError: If pyrealsense2 is not installed.
+ """
+ found_cameras_info = []
+ context = rs.context()
+ devices = context.query_devices()
+
+ for device in devices:
+ camera_info = {
+ "name": device.get_info(rs.camera_info.name),
+ "type": "RealSense",
+ "id": device.get_info(rs.camera_info.serial_number),
+ "firmware_version": device.get_info(rs.camera_info.firmware_version),
+ "usb_type_descriptor": device.get_info(rs.camera_info.usb_type_descriptor),
+ "physical_port": device.get_info(rs.camera_info.physical_port),
+ "product_id": device.get_info(rs.camera_info.product_id),
+ "product_line": device.get_info(rs.camera_info.product_line),
+ }
+
+ # Get stream profiles for each sensor
+ sensors = device.query_sensors()
+ for sensor in sensors:
+ profiles = sensor.get_stream_profiles()
+
+ for profile in profiles:
+ if profile.is_video_stream_profile() and profile.is_default():
+ vprofile = profile.as_video_stream_profile()
+ stream_info = {
+ "stream_type": vprofile.stream_name(),
+ "format": vprofile.format().name,
+ "width": vprofile.width(),
+ "height": vprofile.height(),
+ "fps": vprofile.fps(),
+ }
+ camera_info["default_stream_profile"] = stream_info
+
+ found_cameras_info.append(camera_info)
+
+ return found_cameras_info
+
+ def _find_serial_number_from_name(self, name: str) -> str:
+ """Finds the serial number for a given unique camera name."""
+ camera_infos = self.find_cameras()
+ found_devices = [cam for cam in camera_infos if str(cam["name"]) == name]
+
+ if not found_devices:
+ available_names = [cam["name"] for cam in camera_infos]
+ raise ValueError(
+ f"No RealSense camera found with name '{name}'. Available camera names: {available_names}"
+ )
+
+ if len(found_devices) > 1:
+ serial_numbers = [dev["serial_number"] for dev in found_devices]
+ raise ValueError(
+ f"Multiple RealSense cameras found with name '{name}'. "
+ f"Please use a unique serial number instead. Found SNs: {serial_numbers}"
+ )
+
+ serial_number = str(found_devices[0]["serial_number"])
+ return serial_number
+
+ def _configure_rs_pipeline_config(self, rs_config):
+ """Creates and configures the RealSense pipeline configuration object."""
+ rs.config.enable_device(rs_config, self.serial_number)
+
+ if self.width and self.height and self.fps:
+ rs_config.enable_stream(
+ rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps
+ )
+ if self.use_depth:
+ rs_config.enable_stream(
+ rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps
+ )
+ else:
+ rs_config.enable_stream(rs.stream.color)
+ if self.use_depth:
+ rs_config.enable_stream(rs.stream.depth)
+
+ def _configure_capture_settings(self) -> None:
+ """Sets fps, width, and height from device stream if not already configured.
+
+ Uses the color stream profile to update unset attributes. Handles rotation by
+ swapping width/height when needed. Original capture dimensions are always stored.
+
+ Raises:
+ DeviceNotConnectedError: If device is not connected.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"Cannot validate settings for {self} as it is not connected.")
+
+ stream = self.rs_profile.get_stream(rs.stream.color).as_video_stream_profile()
+
+ if self.fps is None:
+ self.fps = stream.fps()
+
+ if self.width is None or self.height is None:
+ actual_width = int(round(stream.width()))
+ actual_height = int(round(stream.height()))
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
+ self.width, self.height = actual_height, actual_width
+ self.capture_width, self.capture_height = actual_width, actual_height
+ else:
+ self.width, self.height = actual_width, actual_height
+ self.capture_width, self.capture_height = actual_width, actual_height
+
+ def read_depth(self, timeout_ms: int = 200) -> np.ndarray:
+ """
+ Reads a single frame (depth) synchronously from the camera.
+
+ This is a blocking call. It waits for a coherent set of frames (depth)
+ from the camera hardware via the RealSense pipeline.
+
+ Args:
+ timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms.
+
+ Returns:
+ np.ndarray: The depth map as a NumPy array (height, width)
+ of type `np.uint16` (raw depth values in millimeters) and rotation.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is not connected.
+ RuntimeError: If reading frames from the pipeline fails or frames are invalid.
+ """
+
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+ if not self.use_depth:
+ raise RuntimeError(
+ f"Failed to capture depth frame '.read_depth()'. Depth stream is not enabled for {self}."
+ )
+
+ start_time = time.perf_counter()
+
+ ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms)
+
+ if not ret or frame is None:
+ raise RuntimeError(f"{self} read_depth failed (status={ret}).")
+
+ depth_frame = frame.get_depth_frame()
+ depth_map = np.asanyarray(depth_frame.get_data())
+
+ depth_map_processed = self._postprocess_image(depth_map, depth_frame=True)
+
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
+
+ return depth_map_processed
+
+ def read(self, color_mode: ColorMode | None = None, timeout_ms: int = 200) -> np.ndarray:
+ """
+ Reads a single frame (color) synchronously from the camera.
+
+ This is a blocking call. It waits for a coherent set of frames (color)
+ from the camera hardware via the RealSense pipeline.
+
+ Args:
+ timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms.
+
+ Returns:
+ np.ndarray: The captured color frame as a NumPy array
+ (height, width, channels), processed according to `color_mode` and rotation.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is not connected.
+ RuntimeError: If reading frames from the pipeline fails or frames are invalid.
+ ValueError: If an invalid `color_mode` is requested.
+ """
+
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ start_time = time.perf_counter()
+
+ ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms)
+
+ if not ret or frame is None:
+ raise RuntimeError(f"{self} read failed (status={ret}).")
+
+ color_frame = frame.get_color_frame()
+ color_image_raw = np.asanyarray(color_frame.get_data())
+
+ color_image_processed = self._postprocess_image(color_image_raw, color_mode)
+
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
+
+ return color_image_processed
+
+ def _postprocess_image(
+ self, image: np.ndarray, color_mode: ColorMode | None = None, depth_frame: bool = False
+ ) -> np.ndarray:
+ """
+ Applies color conversion, dimension validation, and rotation to a raw color frame.
+
+ Args:
+ image (np.ndarray): The raw image frame (expected RGB format from RealSense).
+ color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None,
+ uses the instance's default `self.color_mode`.
+
+ Returns:
+ np.ndarray: The processed image frame according to `self.color_mode` and `self.rotation`.
+
+ Raises:
+ ValueError: If the requested `color_mode` is invalid.
+ RuntimeError: If the raw frame dimensions do not match the configured
+ `width` and `height`.
+ """
+
+ if color_mode and color_mode not in (ColorMode.RGB, ColorMode.BGR):
+ raise ValueError(
+ f"Invalid requested color mode '{color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}."
+ )
+
+ if depth_frame:
+ h, w = image.shape
+ else:
+ h, w, c = image.shape
+
+ if c != 3:
+ raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).")
+
+ if h != self.capture_height or w != self.capture_width:
+ raise RuntimeError(
+ f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}."
+ )
+
+ processed_image = image
+ if self.color_mode == ColorMode.BGR:
+ processed_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
+ processed_image = cv2.rotate(processed_image, self.rotation)
+
+ return processed_image
+
+ def _read_loop(self):
+ """
+ Internal loop run by the background thread for asynchronous reading.
+
+ On each iteration:
+ 1. Reads a color frame with 500ms timeout
+ 2. Stores result in latest_frame (thread-safe)
+ 3. Sets new_frame_event to notify listeners
+
+ Stops on DeviceNotConnectedError, logs other errors and continues.
+ """
+ while not self.stop_event.is_set():
+ try:
+ color_image = self.read(timeout_ms=500)
+
+ with self.frame_lock:
+ self.latest_frame = color_image
+ self.new_frame_event.set()
+
+ except DeviceNotConnectedError:
+ break
+ except Exception as e:
+ logger.warning(f"Error reading frame in background thread for {self}: {e}")
+
+ def _start_read_thread(self) -> None:
+ """Starts or restarts the background read thread if it's not running."""
+ if self.thread is not None and self.thread.is_alive():
+ self.thread.join(timeout=0.1)
+ if self.stop_event is not None:
+ self.stop_event.set()
+
+ self.stop_event = Event()
+ self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop")
+ self.thread.daemon = True
+ self.thread.start()
+
+ def _stop_read_thread(self):
+ """Signals the background read thread to stop and waits for it to join."""
+ if self.stop_event is not None:
+ self.stop_event.set()
+
+ if self.thread is not None and self.thread.is_alive():
+ self.thread.join(timeout=2.0)
+
+ self.thread = None
+ self.stop_event = None
+
+ # NOTE(Steven): Missing implementation for depth for now
+ def async_read(self, timeout_ms: float = 200) -> np.ndarray:
+ """
+ Reads the latest available frame data (color) asynchronously.
+
+ This method retrieves the most recent color frame captured by the background
+ read thread. It does not block waiting for the camera hardware directly,
+ but may wait up to timeout_ms for the background thread to provide a frame.
+
+ Args:
+ timeout_ms (float): Maximum time in milliseconds to wait for a frame
+ to become available. Defaults to 200ms (0.2 seconds).
+
+ Returns:
+ np.ndarray:
+ The latest captured frame data (color image), processed according to configuration.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is not connected.
+ TimeoutError: If no frame data becomes available within the specified timeout.
+ RuntimeError: If the background thread died unexpectedly or another error occurs.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ if self.thread is None or not self.thread.is_alive():
+ self._start_read_thread()
+
+ if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0):
+ thread_alive = self.thread is not None and self.thread.is_alive()
+ raise TimeoutError(
+ f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. "
+ f"Read thread alive: {thread_alive}."
+ )
+
+ with self.frame_lock:
+ frame = self.latest_frame
+ self.new_frame_event.clear()
+
+ if frame is None:
+ raise RuntimeError(f"Internal error: Event set but no frame available for {self}.")
+
+ return frame
+
+ def disconnect(self):
+ """
+ Disconnects from the camera, stops the pipeline, and cleans up resources.
+
+ Stops the background read thread (if running) and stops the RealSense pipeline.
+
+ Raises:
+ DeviceNotConnectedError: If the camera is already disconnected (pipeline not running).
+ """
+
+ if not self.is_connected and self.thread is None:
+ raise DeviceNotConnectedError(
+ f"Attempted to disconnect {self}, but it appears already disconnected."
+ )
+
+ if self.thread is not None:
+ self._stop_read_thread()
+
+ if self.rs_pipeline is not None:
+ self.rs_pipeline.stop()
+ self.rs_pipeline = None
+ self.rs_profile = None
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/cameras/realsense/configuration_realsense.py b/src/lerobot/cameras/realsense/configuration_realsense.py
new file mode 100644
index 0000000000..36a86876d5
--- /dev/null
+++ b/src/lerobot/cameras/realsense/configuration_realsense.py
@@ -0,0 +1,82 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..configs import CameraConfig, ColorMode, Cv2Rotation
+
+
+@CameraConfig.register_subclass("intelrealsense")
+@dataclass
+class RealSenseCameraConfig(CameraConfig):
+ """Configuration class for Intel RealSense cameras.
+
+ This class provides specialized configuration options for Intel RealSense cameras,
+ including support for depth sensing and device identification via serial number or name.
+
+ Example configurations for Intel RealSense D405:
+ ```python
+ # Basic configurations
+ RealSenseCameraConfig("0123456789", 30, 1280, 720) # 1280x720 @ 30FPS
+ RealSenseCameraConfig("0123456789", 60, 640, 480) # 640x480 @ 60FPS
+
+ # Advanced configurations
+ RealSenseCameraConfig("0123456789", 30, 640, 480, use_depth=True) # With depth sensing
+ RealSenseCameraConfig("0123456789", 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation
+ ```
+
+ Attributes:
+ fps: Requested frames per second for the color stream.
+ width: Requested frame width in pixels for the color stream.
+ height: Requested frame height in pixels for the color stream.
+ serial_number_or_name: Unique serial number or human-readable name to identify the camera.
+ color_mode: Color mode for image output (RGB or BGR). Defaults to RGB.
+ use_depth: Whether to enable depth stream. Defaults to False.
+ rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation.
+ warmup_s: Time reading frames before returning from connect (in seconds)
+
+ Note:
+ - Either name or serial_number must be specified.
+ - Depth stream configuration (if enabled) will use the same FPS as the color stream.
+ - The actual resolution and FPS may be adjusted by the camera to the nearest supported mode.
+ - For `fps`, `width` and `height`, either all of them need to be set, or none of them.
+ """
+
+ serial_number_or_name: str
+ color_mode: ColorMode = ColorMode.RGB
+ use_depth: bool = False
+ rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION
+ warmup_s: int = 1
+
+ def __post_init__(self):
+ if self.color_mode not in (ColorMode.RGB, ColorMode.BGR):
+ raise ValueError(
+ f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided."
+ )
+
+ if self.rotation not in (
+ Cv2Rotation.NO_ROTATION,
+ Cv2Rotation.ROTATE_90,
+ Cv2Rotation.ROTATE_180,
+ Cv2Rotation.ROTATE_270,
+ ):
+ raise ValueError(
+ f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided."
+ )
+
+ values = (self.fps, self.width, self.height)
+ if any(v is not None for v in values) and any(v is None for v in values):
+ raise ValueError(
+ "For `fps`, `width` and `height`, either all of them need to be set, or none of them."
+ )
diff --git a/src/lerobot/cameras/utils.py b/src/lerobot/cameras/utils.py
new file mode 100644
index 0000000000..1eb69840bd
--- /dev/null
+++ b/src/lerobot/cameras/utils.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import platform
+from pathlib import Path
+from typing import TypeAlias
+
+from .camera import Camera
+from .configs import CameraConfig, Cv2Rotation
+
+IndexOrPath: TypeAlias = int | Path
+
+
+def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> dict[str, Camera]:
+ cameras = {}
+
+ for key, cfg in camera_configs.items():
+ if cfg.type == "opencv":
+ from .opencv import OpenCVCamera
+
+ cameras[key] = OpenCVCamera(cfg)
+
+ elif cfg.type == "intelrealsense":
+ from .realsense.camera_realsense import RealSenseCamera
+
+ cameras[key] = RealSenseCamera(cfg)
+ else:
+ raise ValueError(f"The motor type '{cfg.type}' is not valid.")
+
+ return cameras
+
+
+def get_cv2_rotation(rotation: Cv2Rotation) -> int | None:
+ import cv2
+
+ if rotation == Cv2Rotation.ROTATE_90:
+ return cv2.ROTATE_90_CLOCKWISE
+ elif rotation == Cv2Rotation.ROTATE_180:
+ return cv2.ROTATE_180
+ elif rotation == Cv2Rotation.ROTATE_270:
+ return cv2.ROTATE_90_COUNTERCLOCKWISE
+ else:
+ return None
+
+
+def get_cv2_backend() -> int:
+ import cv2
+
+ if platform.system() == "Windows":
+ return cv2.CAP_MSMF # Use MSMF for Windows instead of AVFOUNDATION
+ # elif platform.system() == "Darwin": # macOS
+ # return cv2.CAP_AVFOUNDATION
+ else: # Linux and others
+ return cv2.CAP_ANY
diff --git a/src/lerobot/configs/default.py b/src/lerobot/configs/default.py
new file mode 100644
index 0000000000..53cfe58e78
--- /dev/null
+++ b/src/lerobot/configs/default.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot import (
+ policies, # noqa: F401
+)
+from lerobot.datasets.transforms import ImageTransformsConfig
+from lerobot.datasets.video_utils import get_safe_default_codec
+
+
+@dataclass
+class DatasetConfig:
+ # You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data
+ # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the
+ # "dataset_index" into the returned item. The index mapping is made according to the order in which the
+ # datasets are provided.
+ repo_id: str
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
+ root: str | None = None
+ episodes: list[int] | None = None
+ image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig)
+ revision: str | None = None
+ use_imagenet_stats: bool = True
+ video_backend: str = field(default_factory=get_safe_default_codec)
+
+
+@dataclass
+class WandBConfig:
+ enable: bool = False
+ # Set to true to disable saving an artifact despite training.save_checkpoint=True
+ disable_artifact: bool = False
+ project: str = "lerobot"
+ entity: str | None = None
+ notes: str | None = None
+ run_id: str | None = None
+ mode: str | None = None # Allowed values: 'online', 'offline' 'disabled'. Defaults to 'online'
+
+
+@dataclass
+class EvalConfig:
+ n_episodes: int = 50
+ # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv.
+ batch_size: int = 50
+ # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing).
+ use_async_envs: bool = False
+
+ def __post_init__(self):
+ if self.batch_size > self.n_episodes:
+ raise ValueError(
+ "The eval batch size is greater than the number of eval episodes "
+ f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} "
+ f"eval environments will be instantiated, but only {self.n_episodes} will be used. "
+ "This might significantly slow down evaluation. To fix this, you should update your command "
+ f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), "
+ f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)."
+ )
diff --git a/src/lerobot/configs/eval.py b/src/lerobot/configs/eval.py
new file mode 100644
index 0000000000..cfe48cf879
--- /dev/null
+++ b/src/lerobot/configs/eval.py
@@ -0,0 +1,65 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime as dt
+import logging
+from dataclasses import dataclass, field
+from pathlib import Path
+
+from lerobot import envs, policies # noqa: F401
+from lerobot.configs import parser
+from lerobot.configs.default import EvalConfig
+from lerobot.configs.policies import PreTrainedConfig
+
+
+@dataclass
+class EvalPipelineConfig:
+ # Either the repo ID of a model hosted on the Hub or a path to a directory containing weights
+ # saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch
+ # (useful for debugging). This argument is mutually exclusive with `--config`.
+ env: envs.EnvConfig
+ eval: EvalConfig = field(default_factory=EvalConfig)
+ policy: PreTrainedConfig | None = None
+ output_dir: Path | None = None
+ job_name: str | None = None
+ seed: int | None = 1000
+
+ def __post_init__(self):
+ # HACK: We parse again the cli args here to get the pretrained path if there was one.
+ policy_path = parser.get_path_arg("policy")
+ if policy_path:
+ cli_overrides = parser.get_cli_overrides("policy")
+ self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
+ self.policy.pretrained_path = policy_path
+
+ else:
+ logging.warning(
+ "No pretrained path was provided, evaluated policy will be built from scratch (random weights)."
+ )
+
+ if not self.job_name:
+ if self.env is None:
+ self.job_name = f"{self.policy.type}"
+ else:
+ self.job_name = f"{self.env.type}_{self.policy.type}"
+
+ if not self.output_dir:
+ now = dt.datetime.now()
+ eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}"
+ self.output_dir = Path("outputs/eval") / eval_dir
+
+ @classmethod
+ def __get_path_fields__(cls) -> list[str]:
+ """This enables the parser to load config from the policy using `--policy.path=local/dir`"""
+ return ["policy"]
diff --git a/src/lerobot/configs/parser.py b/src/lerobot/configs/parser.py
new file mode 100644
index 0000000000..2296eaa20c
--- /dev/null
+++ b/src/lerobot/configs/parser.py
@@ -0,0 +1,230 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+import inspect
+import pkgutil
+import sys
+from argparse import ArgumentError
+from collections.abc import Sequence
+from functools import wraps
+from pathlib import Path
+
+import draccus
+
+from lerobot.utils.utils import has_method
+
+PATH_KEY = "path"
+PLUGIN_DISCOVERY_SUFFIX = "discover_packages_path"
+
+
+def get_cli_overrides(field_name: str, args: Sequence[str] | None = None) -> list[str] | None:
+ """Parses arguments from cli at a given nested attribute level.
+
+ For example, supposing the main script was called with:
+ python myscript.py --arg1=1 --arg2.subarg1=abc --arg2.subarg2=some/path
+
+ If called during execution of myscript.py, get_cli_overrides("arg2") will return:
+ ["--subarg1=abc" "--subarg2=some/path"]
+ """
+ if args is None:
+ args = sys.argv[1:]
+ attr_level_args = []
+ detect_string = f"--{field_name}."
+ exclude_strings = (f"--{field_name}.{draccus.CHOICE_TYPE_KEY}=", f"--{field_name}.{PATH_KEY}=")
+ for arg in args:
+ if arg.startswith(detect_string) and not arg.startswith(exclude_strings):
+ denested_arg = f"--{arg.removeprefix(detect_string)}"
+ attr_level_args.append(denested_arg)
+
+ return attr_level_args
+
+
+def parse_arg(arg_name: str, args: Sequence[str] | None = None) -> str | None:
+ if args is None:
+ args = sys.argv[1:]
+ prefix = f"--{arg_name}="
+ for arg in args:
+ if arg.startswith(prefix):
+ return arg[len(prefix) :]
+ return None
+
+
+def parse_plugin_args(plugin_arg_suffix: str, args: Sequence[str]) -> dict:
+ """Parse plugin-related arguments from command-line arguments.
+
+ This function extracts arguments from command-line arguments that match a specified suffix pattern.
+ It processes arguments in the format '--key=value' and returns them as a dictionary.
+
+ Args:
+ plugin_arg_suffix (str): The suffix to identify plugin-related arguments.
+ cli_args (Sequence[str]): A sequence of command-line arguments to parse.
+
+ Returns:
+ dict: A dictionary containing the parsed plugin arguments where:
+ - Keys are the argument names (with '--' prefix removed if present)
+ - Values are the corresponding argument values
+
+ Example:
+ >>> args = ["--env.discover_packages_path=my_package", "--other_arg=value"]
+ >>> parse_plugin_args("discover_packages_path", args)
+ {'env.discover_packages_path': 'my_package'}
+ """
+ plugin_args = {}
+ for arg in args:
+ if "=" in arg and plugin_arg_suffix in arg:
+ key, value = arg.split("=", 1)
+ # Remove leading '--' if present
+ if key.startswith("--"):
+ key = key[2:]
+ plugin_args[key] = value
+ return plugin_args
+
+
+class PluginLoadError(Exception):
+ """Raised when a plugin fails to load."""
+
+
+def load_plugin(plugin_path: str) -> None:
+ """Load and initialize a plugin from a given Python package path.
+
+ This function attempts to load a plugin by importing its package and any submodules.
+ Plugin registration is expected to happen during package initialization, i.e. when
+ the package is imported the gym environment should be registered and the config classes
+ registered with their parents using the `register_subclass` decorator.
+
+ Args:
+ plugin_path (str): The Python package path to the plugin (e.g. "mypackage.plugins.myplugin")
+
+ Raises:
+ PluginLoadError: If the plugin cannot be loaded due to import errors or if the package path is invalid.
+
+ Examples:
+ >>> load_plugin("external_plugin.core") # Loads plugin from external package
+
+ Notes:
+ - The plugin package should handle its own registration during import
+ - All submodules in the plugin package will be imported
+ - Implementation follows the plugin discovery pattern from Python packaging guidelines
+
+ See Also:
+ https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/
+ """
+ try:
+ package_module = importlib.import_module(plugin_path, __package__)
+ except (ImportError, ModuleNotFoundError) as e:
+ raise PluginLoadError(
+ f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}"
+ ) from e
+
+ def iter_namespace(ns_pkg):
+ return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
+
+ try:
+ for _finder, pkg_name, _ispkg in iter_namespace(package_module):
+ importlib.import_module(pkg_name)
+ except ImportError as e:
+ raise PluginLoadError(
+ f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}"
+ ) from e
+
+
+def get_path_arg(field_name: str, args: Sequence[str] | None = None) -> str | None:
+ return parse_arg(f"{field_name}.{PATH_KEY}", args)
+
+
+def get_type_arg(field_name: str, args: Sequence[str] | None = None) -> str | None:
+ return parse_arg(f"{field_name}.{draccus.CHOICE_TYPE_KEY}", args)
+
+
+def filter_arg(field_to_filter: str, args: Sequence[str] | None = None) -> list[str]:
+ return [arg for arg in args if not arg.startswith(f"--{field_to_filter}=")]
+
+
+def filter_path_args(fields_to_filter: str | list[str], args: Sequence[str] | None = None) -> list[str]:
+ """
+ Filters command-line arguments related to fields with specific path arguments.
+
+ Args:
+ fields_to_filter (str | list[str]): A single str or a list of str whose arguments need to be filtered.
+ args (Sequence[str] | None): The sequence of command-line arguments to be filtered.
+ Defaults to None.
+
+ Returns:
+ list[str]: A filtered list of arguments, with arguments related to the specified
+ fields removed.
+
+ Raises:
+ ArgumentError: If both a path argument (e.g., `--field_name.path`) and a type
+ argument (e.g., `--field_name.type`) are specified for the same field.
+ """
+ if isinstance(fields_to_filter, str):
+ fields_to_filter = [fields_to_filter]
+
+ filtered_args = args
+ for field in fields_to_filter:
+ if get_path_arg(field, args):
+ if get_type_arg(field, args):
+ raise ArgumentError(
+ argument=None,
+ message=f"Cannot specify both --{field}.{PATH_KEY} and --{field}.{draccus.CHOICE_TYPE_KEY}",
+ )
+ filtered_args = [arg for arg in filtered_args if not arg.startswith(f"--{field}.")]
+
+ return filtered_args
+
+
+def wrap(config_path: Path | None = None):
+ """
+ HACK: Similar to draccus.wrap but does three additional things:
+ - Will remove '.path' arguments from CLI in order to process them later on.
+ - If a 'config_path' is passed and the main config class has a 'from_pretrained' method, will
+ initialize it from there to allow to fetch configs from the hub directly
+ - Will load plugins specified in the CLI arguments. These plugins will typically register
+ their own subclasses of config classes, so that draccus can find the right class to instantiate
+ from the CLI '.type' arguments
+ """
+
+ def wrapper_outer(fn):
+ @wraps(fn)
+ def wrapper_inner(*args, **kwargs):
+ argspec = inspect.getfullargspec(fn)
+ argtype = argspec.annotations[argspec.args[0]]
+ if len(args) > 0 and type(args[0]) is argtype:
+ cfg = args[0]
+ args = args[1:]
+ else:
+ cli_args = sys.argv[1:]
+ plugin_args = parse_plugin_args(PLUGIN_DISCOVERY_SUFFIX, cli_args)
+ for plugin_cli_arg, plugin_path in plugin_args.items():
+ try:
+ load_plugin(plugin_path)
+ except PluginLoadError as e:
+ # add the relevant CLI arg to the error message
+ raise PluginLoadError(f"{e}\nFailed plugin CLI Arg: {plugin_cli_arg}") from e
+ cli_args = filter_arg(plugin_cli_arg, cli_args)
+ config_path_cli = parse_arg("config_path", cli_args)
+ if has_method(argtype, "__get_path_fields__"):
+ path_fields = argtype.__get_path_fields__()
+ cli_args = filter_path_args(path_fields, cli_args)
+ if has_method(argtype, "from_pretrained") and config_path_cli:
+ cli_args = filter_arg("config_path", cli_args)
+ cfg = argtype.from_pretrained(config_path_cli, cli_args=cli_args)
+ else:
+ cfg = draccus.parse(config_class=argtype, config_path=config_path, args=cli_args)
+ response = fn(cfg, *args, **kwargs)
+ return response
+
+ return wrapper_inner
+
+ return wrapper_outer
diff --git a/src/lerobot/configs/policies.py b/src/lerobot/configs/policies.py
new file mode 100644
index 0000000000..c5b2fa09e3
--- /dev/null
+++ b/src/lerobot/configs/policies.py
@@ -0,0 +1,206 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+import builtins
+import json
+import logging
+import os
+import tempfile
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import TypeVar
+
+import draccus
+from huggingface_hub import hf_hub_download
+from huggingface_hub.constants import CONFIG_NAME
+from huggingface_hub.errors import HfHubHTTPError
+
+from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
+from lerobot.optim.optimizers import OptimizerConfig
+from lerobot.optim.schedulers import LRSchedulerConfig
+from lerobot.utils.hub import HubMixin
+from lerobot.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
+
+T = TypeVar("T", bound="PreTrainedConfig")
+
+
+@dataclass
+class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC):
+ """
+ Base configuration class for policy models.
+
+ Args:
+ n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
+ current step and additional steps going back).
+ input_shapes: A dictionary defining the shapes of the input data for the policy.
+ output_shapes: A dictionary defining the shapes of the output data for the policy.
+ input_normalization_modes: A dictionary with key representing the modality and the value specifies the
+ normalization mode to apply.
+ output_normalization_modes: Similar dictionary as `input_normalization_modes`, but to unnormalize to
+ the original scale.
+ """
+
+ n_obs_steps: int = 1
+ normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict)
+
+ input_features: dict[str, PolicyFeature] = field(default_factory=dict)
+ output_features: dict[str, PolicyFeature] = field(default_factory=dict)
+
+ device: str | None = None # cuda | cpu | mp
+ # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
+ # automatic gradient scaling is used.
+ use_amp: bool = False
+
+ push_to_hub: bool = True
+ repo_id: str | None = None
+
+ # Upload on private repository on the Hugging Face hub.
+ private: bool | None = None
+ # Add tags to your policy on the hub.
+ tags: list[str] | None = None
+ # Add tags to your policy on the hub.
+ license: str | None = None
+
+ def __post_init__(self):
+ self.pretrained_path = None
+ if not self.device or not is_torch_device_available(self.device):
+ auto_device = auto_select_torch_device()
+ logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.")
+ self.device = auto_device.type
+
+ # Automatically deactivate AMP if necessary
+ if self.use_amp and not is_amp_available(self.device):
+ logging.warning(
+ f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
+ )
+ self.use_amp = False
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
+
+ @property
+ @abc.abstractmethod
+ def observation_delta_indices(self) -> list | None:
+ raise NotImplementedError
+
+ @property
+ @abc.abstractmethod
+ def action_delta_indices(self) -> list | None:
+ raise NotImplementedError
+
+ @property
+ @abc.abstractmethod
+ def reward_delta_indices(self) -> list | None:
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def get_optimizer_preset(self) -> OptimizerConfig:
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def get_scheduler_preset(self) -> LRSchedulerConfig | None:
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def validate_features(self) -> None:
+ raise NotImplementedError
+
+ @property
+ def robot_state_feature(self) -> PolicyFeature | None:
+ for _, ft in self.input_features.items():
+ if ft.type is FeatureType.STATE:
+ return ft
+ return None
+
+ @property
+ def env_state_feature(self) -> PolicyFeature | None:
+ for _, ft in self.input_features.items():
+ if ft.type is FeatureType.ENV:
+ return ft
+ return None
+
+ @property
+ def image_features(self) -> dict[str, PolicyFeature]:
+ return {key: ft for key, ft in self.input_features.items() if ft.type is FeatureType.VISUAL}
+
+ @property
+ def action_feature(self) -> PolicyFeature | None:
+ for _, ft in self.output_features.items():
+ if ft.type is FeatureType.ACTION:
+ return ft
+ return None
+
+ def _save_pretrained(self, save_directory: Path) -> None:
+ with open(save_directory / CONFIG_NAME, "w") as f, draccus.config_type("json"):
+ draccus.dump(self, f, indent=4)
+
+ @classmethod
+ def from_pretrained(
+ cls: builtins.type[T],
+ pretrained_name_or_path: str | Path,
+ *,
+ force_download: bool = False,
+ resume_download: bool = None,
+ proxies: dict | None = None,
+ token: str | bool | None = None,
+ cache_dir: str | Path | None = None,
+ local_files_only: bool = False,
+ revision: str | None = None,
+ **policy_kwargs,
+ ) -> T:
+ model_id = str(pretrained_name_or_path)
+ config_file: str | None = None
+ if Path(model_id).is_dir():
+ if CONFIG_NAME in os.listdir(model_id):
+ config_file = os.path.join(model_id, CONFIG_NAME)
+ else:
+ print(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}")
+ else:
+ try:
+ config_file = hf_hub_download(
+ repo_id=model_id,
+ filename=CONFIG_NAME,
+ revision=revision,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ proxies=proxies,
+ resume_download=resume_download,
+ token=token,
+ local_files_only=local_files_only,
+ )
+ except HfHubHTTPError as e:
+ raise FileNotFoundError(
+ f"{CONFIG_NAME} not found on the HuggingFace Hub in {model_id}"
+ ) from e
+
+ # HACK: Parse the original config to get the config subclass, so that we can
+ # apply cli overrides.
+ # This is very ugly, ideally we'd like to be able to do that natively with draccus
+ # something like --policy.path (in addition to --policy.type)
+ with draccus.config_type("json"):
+ orig_config = draccus.parse(cls, config_file, args=[])
+
+ with open(config_file) as f:
+ config = json.load(f)
+
+ config.pop("type")
+ with tempfile.NamedTemporaryFile("w+") as f:
+ json.dump(config, f)
+ config_file = f.name
+ f.flush()
+
+ cli_overrides = policy_kwargs.pop("cli_overrides", [])
+ with draccus.config_type("json"):
+ return draccus.parse(orig_config.__class__, config_file, args=cli_overrides)
diff --git a/src/lerobot/configs/train.py b/src/lerobot/configs/train.py
new file mode 100644
index 0000000000..60a4d81d52
--- /dev/null
+++ b/src/lerobot/configs/train.py
@@ -0,0 +1,184 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import builtins
+import datetime as dt
+import os
+from dataclasses import dataclass, field
+from pathlib import Path
+
+import draccus
+from huggingface_hub import hf_hub_download
+from huggingface_hub.errors import HfHubHTTPError
+
+from lerobot import envs
+from lerobot.configs import parser
+from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.optim import OptimizerConfig
+from lerobot.optim.schedulers import LRSchedulerConfig
+from lerobot.utils.hub import HubMixin
+
+TRAIN_CONFIG_NAME = "train_config.json"
+
+
+@dataclass
+class TrainPipelineConfig(HubMixin):
+ dataset: DatasetConfig
+ env: envs.EnvConfig | None = None
+ policy: PreTrainedConfig | None = None
+ # Set `dir` to where you would like to save all of the run outputs. If you run another training session
+ # with the same value for `dir` its contents will be overwritten unless you set `resume` to true.
+ output_dir: Path | None = None
+ job_name: str | None = None
+ # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure
+ # `dir` is the directory of an existing run with at least one checkpoint in it.
+ # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint,
+ # regardless of what's provided with the training command at the time of resumption.
+ resume: bool = False
+ # `seed` is used for training (eg: model initialization, dataset shuffling)
+ # AND for the evaluation environments.
+ seed: int | None = 1000
+ # Number of workers for the dataloader.
+ num_workers: int = 4
+ batch_size: int = 8
+ steps: int = 100_000
+ eval_freq: int = 20_000
+ log_freq: int = 200
+ save_checkpoint: bool = True
+ # Checkpoint is saved every `save_freq` training iterations and after the last training step.
+ save_freq: int = 20_000
+ use_policy_training_preset: bool = True
+ optimizer: OptimizerConfig | None = None
+ scheduler: LRSchedulerConfig | None = None
+ eval: EvalConfig = field(default_factory=EvalConfig)
+ wandb: WandBConfig = field(default_factory=WandBConfig)
+
+ def __post_init__(self):
+ self.checkpoint_path = None
+
+ def validate(self):
+ # HACK: We parse again the cli args here to get the pretrained paths if there was some.
+ policy_path = parser.get_path_arg("policy")
+ if policy_path:
+ # Only load the policy config
+ cli_overrides = parser.get_cli_overrides("policy")
+ self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
+ self.policy.pretrained_path = policy_path
+ elif self.resume:
+ # The entire train config is already loaded, we just need to get the checkpoint dir
+ config_path = parser.parse_arg("config_path")
+ if not config_path:
+ raise ValueError(
+ f"A config_path is expected when resuming a run. Please specify path to {TRAIN_CONFIG_NAME}"
+ )
+ if not Path(config_path).resolve().exists():
+ raise NotADirectoryError(
+ f"{config_path=} is expected to be a local path. "
+ "Resuming from the hub is not supported for now."
+ )
+ policy_path = Path(config_path).parent
+ self.policy.pretrained_path = policy_path
+ self.checkpoint_path = policy_path.parent
+
+ if not self.job_name:
+ if self.env is None:
+ self.job_name = f"{self.policy.type}"
+ else:
+ self.job_name = f"{self.env.type}_{self.policy.type}"
+
+ if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir():
+ raise FileExistsError(
+ f"Output directory {self.output_dir} already exists and resume is {self.resume}. "
+ f"Please change your output directory so that {self.output_dir} is not overwritten."
+ )
+ elif not self.output_dir:
+ now = dt.datetime.now()
+ train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}"
+ self.output_dir = Path("outputs/train") / train_dir
+
+ if isinstance(self.dataset.repo_id, list):
+ raise NotImplementedError("LeRobotMultiDataset is not currently implemented.")
+
+ if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None):
+ raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.")
+ elif self.use_policy_training_preset and not self.resume:
+ self.optimizer = self.policy.get_optimizer_preset()
+ self.scheduler = self.policy.get_scheduler_preset()
+
+ if self.policy.push_to_hub and not self.policy.repo_id:
+ raise ValueError(
+ "'policy.repo_id' argument missing. Please specify it to push the model to the hub."
+ )
+
+ @classmethod
+ def __get_path_fields__(cls) -> list[str]:
+ """This enables the parser to load config from the policy using `--policy.path=local/dir`"""
+ return ["policy"]
+
+ def to_dict(self) -> dict:
+ return draccus.encode(self)
+
+ def _save_pretrained(self, save_directory: Path) -> None:
+ with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"):
+ draccus.dump(self, f, indent=4)
+
+ @classmethod
+ def from_pretrained(
+ cls: builtins.type["TrainPipelineConfig"],
+ pretrained_name_or_path: str | Path,
+ *,
+ force_download: bool = False,
+ resume_download: bool = None,
+ proxies: dict | None = None,
+ token: str | bool | None = None,
+ cache_dir: str | Path | None = None,
+ local_files_only: bool = False,
+ revision: str | None = None,
+ **kwargs,
+ ) -> "TrainPipelineConfig":
+ model_id = str(pretrained_name_or_path)
+ config_file: str | None = None
+ if Path(model_id).is_dir():
+ if TRAIN_CONFIG_NAME in os.listdir(model_id):
+ config_file = os.path.join(model_id, TRAIN_CONFIG_NAME)
+ else:
+ print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}")
+ elif Path(model_id).is_file():
+ config_file = model_id
+ else:
+ try:
+ config_file = hf_hub_download(
+ repo_id=model_id,
+ filename=TRAIN_CONFIG_NAME,
+ revision=revision,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ proxies=proxies,
+ resume_download=resume_download,
+ token=token,
+ local_files_only=local_files_only,
+ )
+ except HfHubHTTPError as e:
+ raise FileNotFoundError(
+ f"{TRAIN_CONFIG_NAME} not found on the HuggingFace Hub in {model_id}"
+ ) from e
+
+ cli_args = kwargs.pop("cli_args", [])
+ with draccus.config_type("json"):
+ return draccus.parse(cls, config_file, args=cli_args)
+
+
+@dataclass(kw_only=True)
+class TrainRLServerPipelineConfig(TrainPipelineConfig):
+ dataset: DatasetConfig | None = None # NOTE: In RL, we don't need an offline dataset
diff --git a/src/lerobot/configs/types.py b/src/lerobot/configs/types.py
new file mode 100644
index 0000000000..6040ff70ba
--- /dev/null
+++ b/src/lerobot/configs/types.py
@@ -0,0 +1,42 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# Note: We subclass str so that serialization is straightforward
+# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json
+from dataclasses import dataclass
+from enum import Enum
+from typing import Any, Protocol
+
+
+class FeatureType(str, Enum):
+ STATE = "STATE"
+ VISUAL = "VISUAL"
+ ENV = "ENV"
+ ACTION = "ACTION"
+ REWARD = "REWARD"
+
+
+class NormalizationMode(str, Enum):
+ MIN_MAX = "MIN_MAX"
+ MEAN_STD = "MEAN_STD"
+ IDENTITY = "IDENTITY"
+
+
+class DictLike(Protocol):
+ def __getitem__(self, key: Any) -> Any: ...
+
+
+@dataclass
+class PolicyFeature:
+ type: FeatureType
+ shape: tuple
diff --git a/src/lerobot/constants.py b/src/lerobot/constants.py
new file mode 100644
index 0000000000..30777239ef
--- /dev/null
+++ b/src/lerobot/constants.py
@@ -0,0 +1,54 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# keys
+import os
+from pathlib import Path
+
+from huggingface_hub.constants import HF_HOME
+
+OBS_ENV_STATE = "observation.environment_state"
+OBS_STATE = "observation.state"
+OBS_IMAGE = "observation.image"
+OBS_IMAGES = "observation.images"
+ACTION = "action"
+REWARD = "next.reward"
+
+ROBOTS = "robots"
+ROBOT_TYPE = "robot_type"
+TELEOPERATORS = "teleoperators"
+
+# files & directories
+CHECKPOINTS_DIR = "checkpoints"
+LAST_CHECKPOINT_LINK = "last"
+PRETRAINED_MODEL_DIR = "pretrained_model"
+TRAINING_STATE_DIR = "training_state"
+RNG_STATE = "rng_state.safetensors"
+TRAINING_STEP = "training_step.json"
+OPTIMIZER_STATE = "optimizer_state.safetensors"
+OPTIMIZER_PARAM_GROUPS = "optimizer_param_groups.json"
+SCHEDULER_STATE = "scheduler_state.json"
+
+if "LEROBOT_HOME" in os.environ:
+ raise ValueError(
+ f"You have a 'LEROBOT_HOME' environment variable set to '{os.getenv('LEROBOT_HOME')}'.\n"
+ "'LEROBOT_HOME' is deprecated, please use 'HF_LEROBOT_HOME' instead."
+ )
+
+# cache dir
+default_cache_path = Path(HF_HOME) / "lerobot"
+HF_LEROBOT_HOME = Path(os.getenv("HF_LEROBOT_HOME", default_cache_path)).expanduser()
+
+# calibration dir
+default_calibration_path = HF_LEROBOT_HOME / "calibration"
+HF_LEROBOT_CALIBRATION = Path(os.getenv("HF_LEROBOT_CALIBRATION", default_calibration_path)).expanduser()
diff --git a/src/lerobot/datasets/backward_compatibility.py b/src/lerobot/datasets/backward_compatibility.py
new file mode 100644
index 0000000000..fae4850581
--- /dev/null
+++ b/src/lerobot/datasets/backward_compatibility.py
@@ -0,0 +1,68 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import packaging.version
+
+V2_MESSAGE = """
+The dataset you requested ({repo_id}) is in {version} format.
+
+We introduced a new format since v2.0 which is not backward compatible with v1.x.
+Please, use our conversion script. Modify the following command with your own task description:
+```
+python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \\
+ --repo-id {repo_id} \\
+ --single-task "TASK DESCRIPTION." # <---- /!\\ Replace TASK DESCRIPTION /!\\
+```
+
+A few examples to replace TASK DESCRIPTION: "Pick up the blue cube and place it into the bin.", "Insert the
+peg into the socket.", "Slide open the ziploc bag.", "Take the elevator to the 1st floor.", "Open the top
+cabinet, store the pot inside it then close the cabinet.", "Push the T-shaped block onto the T-shaped
+target.", "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.", "Fold the
+sweatshirt.", ...
+
+If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb)
+or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose).
+"""
+
+V21_MESSAGE = """
+The dataset you requested ({repo_id}) is in {version} format.
+While current version of LeRobot is backward-compatible with it, the version of your dataset still uses global
+stats instead of per-episode stats. Update your dataset stats to the new format using this command:
+```
+python -m lerobot.datasets.v21.convert_dataset_v20_to_v21 --repo-id={repo_id}
+```
+
+If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb)
+or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose).
+"""
+
+FUTURE_MESSAGE = """
+The dataset you requested ({repo_id}) is only available in {version} format.
+As we cannot ensure forward compatibility with it, please update your current version of lerobot.
+"""
+
+
+class CompatibilityError(Exception): ...
+
+
+class BackwardCompatibilityError(CompatibilityError):
+ def __init__(self, repo_id: str, version: packaging.version.Version):
+ message = V2_MESSAGE.format(repo_id=repo_id, version=version)
+ super().__init__(message)
+
+
+class ForwardCompatibilityError(CompatibilityError):
+ def __init__(self, repo_id: str, version: packaging.version.Version):
+ message = FUTURE_MESSAGE.format(repo_id=repo_id, version=version)
+ super().__init__(message)
diff --git a/lerobot/common/datasets/card_template.md b/src/lerobot/datasets/card_template.md
similarity index 95%
rename from lerobot/common/datasets/card_template.md
rename to src/lerobot/datasets/card_template.md
index 7ee27df95d..ee26a78f57 100644
--- a/lerobot/common/datasets/card_template.md
+++ b/src/lerobot/datasets/card_template.md
@@ -1,7 +1,8 @@
---
# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1
# Doc / guide: https://huggingface.co/docs/hub/datasets-cards
-{{ card_data }}
+# prettier-ignore
+{{card_data}}
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
diff --git a/src/lerobot/datasets/compute_stats.py b/src/lerobot/datasets/compute_stats.py
new file mode 100644
index 0000000000..bfe7b18b4e
--- /dev/null
+++ b/src/lerobot/datasets/compute_stats.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import numpy as np
+
+from lerobot.datasets.utils import load_image_as_numpy
+
+
+def estimate_num_samples(
+ dataset_len: int, min_num_samples: int = 100, max_num_samples: int = 10_000, power: float = 0.75
+) -> int:
+ """Heuristic to estimate the number of samples based on dataset size.
+ The power controls the sample growth relative to dataset size.
+ Lower the power for less number of samples.
+
+ For default arguments, we have:
+ - from 1 to ~500, num_samples=100
+ - at 1000, num_samples=177
+ - at 2000, num_samples=299
+ - at 5000, num_samples=594
+ - at 10000, num_samples=1000
+ - at 20000, num_samples=1681
+ """
+ if dataset_len < min_num_samples:
+ min_num_samples = dataset_len
+ return max(min_num_samples, min(int(dataset_len**power), max_num_samples))
+
+
+def sample_indices(data_len: int) -> list[int]:
+ num_samples = estimate_num_samples(data_len)
+ return np.round(np.linspace(0, data_len - 1, num_samples)).astype(int).tolist()
+
+
+def auto_downsample_height_width(img: np.ndarray, target_size: int = 150, max_size_threshold: int = 300):
+ _, height, width = img.shape
+
+ if max(width, height) < max_size_threshold:
+ # no downsampling needed
+ return img
+
+ downsample_factor = int(width / target_size) if width > height else int(height / target_size)
+ return img[:, ::downsample_factor, ::downsample_factor]
+
+
+def sample_images(image_paths: list[str]) -> np.ndarray:
+ sampled_indices = sample_indices(len(image_paths))
+
+ images = None
+ for i, idx in enumerate(sampled_indices):
+ path = image_paths[idx]
+ # we load as uint8 to reduce memory usage
+ img = load_image_as_numpy(path, dtype=np.uint8, channel_first=True)
+ img = auto_downsample_height_width(img)
+
+ if images is None:
+ images = np.empty((len(sampled_indices), *img.shape), dtype=np.uint8)
+
+ images[i] = img
+
+ return images
+
+
+def get_feature_stats(array: np.ndarray, axis: tuple, keepdims: bool) -> dict[str, np.ndarray]:
+ return {
+ "min": np.min(array, axis=axis, keepdims=keepdims),
+ "max": np.max(array, axis=axis, keepdims=keepdims),
+ "mean": np.mean(array, axis=axis, keepdims=keepdims),
+ "std": np.std(array, axis=axis, keepdims=keepdims),
+ "count": np.array([len(array)]),
+ }
+
+
+def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], features: dict) -> dict:
+ ep_stats = {}
+ for key, data in episode_data.items():
+ if features[key]["dtype"] == "string":
+ continue # HACK: we should receive np.arrays of strings
+ elif features[key]["dtype"] in ["image", "video"]:
+ ep_ft_array = sample_images(data) # data is a list of image paths
+ axes_to_reduce = (0, 2, 3) # keep channel dim
+ keepdims = True
+ else:
+ ep_ft_array = data # data is already a np.ndarray
+ axes_to_reduce = 0 # compute stats over the first axis
+ keepdims = data.ndim == 1 # keep as np.array
+
+ ep_stats[key] = get_feature_stats(ep_ft_array, axis=axes_to_reduce, keepdims=keepdims)
+
+ # finally, we normalize and remove batch dim for images
+ if features[key]["dtype"] in ["image", "video"]:
+ ep_stats[key] = {
+ k: v if k == "count" else np.squeeze(v / 255.0, axis=0) for k, v in ep_stats[key].items()
+ }
+
+ return ep_stats
+
+
+def _assert_type_and_shape(stats_list: list[dict[str, dict]]):
+ for i in range(len(stats_list)):
+ for fkey in stats_list[i]:
+ for k, v in stats_list[i][fkey].items():
+ if not isinstance(v, np.ndarray):
+ raise ValueError(
+ f"Stats must be composed of numpy array, but key '{k}' of feature '{fkey}' is of type '{type(v)}' instead."
+ )
+ if v.ndim == 0:
+ raise ValueError("Number of dimensions must be at least 1, and is 0 instead.")
+ if k == "count" and v.shape != (1,):
+ raise ValueError(f"Shape of 'count' must be (1), but is {v.shape} instead.")
+ if "image" in fkey and k != "count" and v.shape != (3, 1, 1):
+ raise ValueError(f"Shape of '{k}' must be (3,1,1), but is {v.shape} instead.")
+
+
+def aggregate_feature_stats(stats_ft_list: list[dict[str, dict]]) -> dict[str, dict[str, np.ndarray]]:
+ """Aggregates stats for a single feature."""
+ means = np.stack([s["mean"] for s in stats_ft_list])
+ variances = np.stack([s["std"] ** 2 for s in stats_ft_list])
+ counts = np.stack([s["count"] for s in stats_ft_list])
+ total_count = counts.sum(axis=0)
+
+ # Prepare weighted mean by matching number of dimensions
+ while counts.ndim < means.ndim:
+ counts = np.expand_dims(counts, axis=-1)
+
+ # Compute the weighted mean
+ weighted_means = means * counts
+ total_mean = weighted_means.sum(axis=0) / total_count
+
+ # Compute the variance using the parallel algorithm
+ delta_means = means - total_mean
+ weighted_variances = (variances + delta_means**2) * counts
+ total_variance = weighted_variances.sum(axis=0) / total_count
+
+ return {
+ "min": np.min(np.stack([s["min"] for s in stats_ft_list]), axis=0),
+ "max": np.max(np.stack([s["max"] for s in stats_ft_list]), axis=0),
+ "mean": total_mean,
+ "std": np.sqrt(total_variance),
+ "count": total_count,
+ }
+
+
+def aggregate_stats(stats_list: list[dict[str, dict]]) -> dict[str, dict[str, np.ndarray]]:
+ """Aggregate stats from multiple compute_stats outputs into a single set of stats.
+
+ The final stats will have the union of all data keys from each of the stats dicts.
+
+ For instance:
+ - new_min = min(min_dataset_0, min_dataset_1, ...)
+ - new_max = max(max_dataset_0, max_dataset_1, ...)
+ - new_mean = (mean of all data, weighted by counts)
+ - new_std = (std of all data)
+ """
+
+ _assert_type_and_shape(stats_list)
+
+ data_keys = {key for stats in stats_list for key in stats}
+ aggregated_stats = {key: {} for key in data_keys}
+
+ for key in data_keys:
+ stats_with_key = [stats[key] for stats in stats_list if key in stats]
+ aggregated_stats[key] = aggregate_feature_stats(stats_with_key)
+
+ return aggregated_stats
diff --git a/src/lerobot/datasets/factory.py b/src/lerobot/datasets/factory.py
new file mode 100644
index 0000000000..e06650bc9e
--- /dev/null
+++ b/src/lerobot/datasets/factory.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from pprint import pformat
+
+import torch
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.train import TrainPipelineConfig
+from lerobot.datasets.lerobot_dataset import (
+ LeRobotDataset,
+ LeRobotDatasetMetadata,
+ MultiLeRobotDataset,
+)
+from lerobot.datasets.transforms import ImageTransforms
+
+IMAGENET_STATS = {
+ "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1)
+ "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1)
+}
+
+
+def resolve_delta_timestamps(
+ cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata
+) -> dict[str, list] | None:
+ """Resolves delta_timestamps by reading from the 'delta_indices' properties of the PreTrainedConfig.
+
+ Args:
+ cfg (PreTrainedConfig): The PreTrainedConfig to read delta_indices from.
+ ds_meta (LeRobotDatasetMetadata): The dataset from which features and fps are used to build
+ delta_timestamps against.
+
+ Returns:
+ dict[str, list] | None: A dictionary of delta_timestamps, e.g.:
+ {
+ "observation.state": [-0.04, -0.02, 0]
+ "observation.action": [-0.02, 0, 0.02]
+ }
+ returns `None` if the resulting dict is empty.
+ """
+ delta_timestamps = {}
+ for key in ds_meta.features:
+ if key == "next.reward" and cfg.reward_delta_indices is not None:
+ delta_timestamps[key] = [i / ds_meta.fps for i in cfg.reward_delta_indices]
+ if key == "action" and cfg.action_delta_indices is not None:
+ delta_timestamps[key] = [i / ds_meta.fps for i in cfg.action_delta_indices]
+ if key.startswith("observation.") and cfg.observation_delta_indices is not None:
+ delta_timestamps[key] = [i / ds_meta.fps for i in cfg.observation_delta_indices]
+
+ if len(delta_timestamps) == 0:
+ delta_timestamps = None
+
+ return delta_timestamps
+
+
+def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset:
+ """Handles the logic of setting up delta timestamps and image transforms before creating a dataset.
+
+ Args:
+ cfg (TrainPipelineConfig): A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig.
+
+ Raises:
+ NotImplementedError: The MultiLeRobotDataset is currently deactivated.
+
+ Returns:
+ LeRobotDataset | MultiLeRobotDataset
+ """
+ image_transforms = (
+ ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None
+ )
+
+ if isinstance(cfg.dataset.repo_id, str):
+ ds_meta = LeRobotDatasetMetadata(
+ cfg.dataset.repo_id, root=cfg.dataset.root, revision=cfg.dataset.revision
+ )
+ delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta)
+ dataset = LeRobotDataset(
+ cfg.dataset.repo_id,
+ root=cfg.dataset.root,
+ episodes=cfg.dataset.episodes,
+ delta_timestamps=delta_timestamps,
+ image_transforms=image_transforms,
+ revision=cfg.dataset.revision,
+ video_backend=cfg.dataset.video_backend,
+ )
+ else:
+ raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.")
+ dataset = MultiLeRobotDataset(
+ cfg.dataset.repo_id,
+ # TODO(aliberts): add proper support for multi dataset
+ # delta_timestamps=delta_timestamps,
+ image_transforms=image_transforms,
+ video_backend=cfg.dataset.video_backend,
+ )
+ logging.info(
+ "Multiple datasets were provided. Applied the following index mapping to the provided datasets: "
+ f"{pformat(dataset.repo_id_to_index, indent=2)}"
+ )
+
+ if cfg.dataset.use_imagenet_stats:
+ for key in dataset.meta.camera_keys:
+ for stats_type, stats in IMAGENET_STATS.items():
+ dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32)
+
+ return dataset
diff --git a/lerobot/common/datasets/image_writer.py b/src/lerobot/datasets/image_writer.py
similarity index 82%
rename from lerobot/common/datasets/image_writer.py
rename to src/lerobot/datasets/image_writer.py
index 85dd6830bc..4a4e1ab058 100644
--- a/lerobot/common/datasets/image_writer.py
+++ b/src/lerobot/datasets/image_writer.py
@@ -38,22 +38,40 @@ def wrapper(*args, **kwargs):
return wrapper
-def image_array_to_image(image_array: np.ndarray) -> PIL.Image.Image:
+def image_array_to_pil_image(image_array: np.ndarray, range_check: bool = True) -> PIL.Image.Image:
# TODO(aliberts): handle 1 channel and 4 for depth images
- if image_array.ndim == 3 and image_array.shape[0] in [1, 3]:
+ if image_array.ndim != 3:
+ raise ValueError(f"The array has {image_array.ndim} dimensions, but 3 is expected for an image.")
+
+ if image_array.shape[0] == 3:
# Transpose from pytorch convention (C, H, W) to (H, W, C)
image_array = image_array.transpose(1, 2, 0)
+
+ elif image_array.shape[-1] != 3:
+ raise NotImplementedError(
+ f"The image has {image_array.shape[-1]} channels, but 3 is required for now."
+ )
+
if image_array.dtype != np.uint8:
- # Assume the image is in [0, 1] range for floating-point data
- image_array = np.clip(image_array, 0, 1)
+ if range_check:
+ max_ = image_array.max().item()
+ min_ = image_array.min().item()
+ if max_ > 1.0 or min_ < 0.0:
+ raise ValueError(
+ "The image data type is float, which requires values in the range [0.0, 1.0]. "
+ f"However, the provided range is [{min_}, {max_}]. Please adjust the range or "
+ "provide a uint8 image with values in the range [0, 255]."
+ )
+
image_array = (image_array * 255).astype(np.uint8)
+
return PIL.Image.fromarray(image_array)
def write_image(image: np.ndarray | PIL.Image.Image, fpath: Path):
try:
if isinstance(image, np.ndarray):
- img = image_array_to_image(image)
+ img = image_array_to_pil_image(image)
elif isinstance(image, PIL.Image.Image):
img = image
else:
@@ -88,7 +106,7 @@ def worker_process(queue: queue.Queue, num_threads: int):
class AsyncImageWriter:
"""
This class abstract away the initialisation of processes or/and threads to
- save images on disk asynchrounously, which is critical to control a robot and record data
+ save images on disk asynchronously, which is critical to control a robot and record data
at a high frame rate.
When `num_processes=0`, it creates a threads pool of size `num_threads`.
diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py
new file mode 100644
index 0000000000..72d1a722c5
--- /dev/null
+++ b/src/lerobot/datasets/lerobot_dataset.py
@@ -0,0 +1,1234 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import contextlib
+import logging
+import shutil
+from collections.abc import Callable
+from pathlib import Path
+
+import datasets
+import numpy as np
+import packaging.version
+import PIL.Image
+import torch
+import torch.utils
+from datasets import concatenate_datasets, load_dataset
+from huggingface_hub import HfApi, snapshot_download
+from huggingface_hub.constants import REPOCARD_NAME
+from huggingface_hub.errors import RevisionNotFoundError
+
+from lerobot.constants import HF_LEROBOT_HOME
+from lerobot.datasets.compute_stats import aggregate_stats, compute_episode_stats
+from lerobot.datasets.image_writer import AsyncImageWriter, write_image
+from lerobot.datasets.utils import (
+ DEFAULT_FEATURES,
+ DEFAULT_IMAGE_PATH,
+ INFO_PATH,
+ TASKS_PATH,
+ _validate_feature_names,
+ append_jsonlines,
+ backward_compatible_episodes_stats,
+ check_delta_timestamps,
+ check_timestamps_sync,
+ check_version_compatibility,
+ create_empty_dataset_info,
+ create_lerobot_dataset_card,
+ embed_images,
+ get_delta_indices,
+ get_episode_data_index,
+ get_hf_features_from_features,
+ get_safe_version,
+ hf_transform_to_torch,
+ is_valid_version,
+ load_episodes,
+ load_episodes_stats,
+ load_info,
+ load_stats,
+ load_tasks,
+ validate_episode_buffer,
+ validate_frame,
+ write_episode,
+ write_episode_stats,
+ write_info,
+ write_json,
+)
+from lerobot.datasets.video_utils import (
+ VideoFrame,
+ decode_video_frames,
+ encode_video_frames,
+ get_safe_default_codec,
+ get_video_info,
+)
+
+CODEBASE_VERSION = "v2.1"
+
+
+class LeRobotDatasetMetadata:
+ def __init__(
+ self,
+ repo_id: str,
+ root: str | Path | None = None,
+ revision: str | None = None,
+ force_cache_sync: bool = False,
+ ):
+ self.repo_id = repo_id
+ self.revision = revision if revision else CODEBASE_VERSION
+ self.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id
+
+ try:
+ if force_cache_sync:
+ raise FileNotFoundError
+ self.load_metadata()
+ except (FileNotFoundError, NotADirectoryError):
+ if is_valid_version(self.revision):
+ self.revision = get_safe_version(self.repo_id, self.revision)
+
+ (self.root / "meta").mkdir(exist_ok=True, parents=True)
+ self.pull_from_repo(allow_patterns="meta/")
+ self.load_metadata()
+
+ def load_metadata(self):
+ self.info = load_info(self.root)
+ check_version_compatibility(self.repo_id, self._version, CODEBASE_VERSION)
+ self.tasks, self.task_to_task_index = load_tasks(self.root)
+ self.episodes = load_episodes(self.root)
+ if self._version < packaging.version.parse("v2.1"):
+ self.stats = load_stats(self.root)
+ self.episodes_stats = backward_compatible_episodes_stats(self.stats, self.episodes)
+ else:
+ self.episodes_stats = load_episodes_stats(self.root)
+ self.stats = aggregate_stats(list(self.episodes_stats.values()))
+
+ def pull_from_repo(
+ self,
+ allow_patterns: list[str] | str | None = None,
+ ignore_patterns: list[str] | str | None = None,
+ ) -> None:
+ snapshot_download(
+ self.repo_id,
+ repo_type="dataset",
+ revision=self.revision,
+ local_dir=self.root,
+ allow_patterns=allow_patterns,
+ ignore_patterns=ignore_patterns,
+ )
+
+ @property
+ def _version(self) -> packaging.version.Version:
+ """Codebase version used to create this dataset."""
+ return packaging.version.parse(self.info["codebase_version"])
+
+ def get_data_file_path(self, ep_index: int) -> Path:
+ ep_chunk = self.get_episode_chunk(ep_index)
+ fpath = self.data_path.format(episode_chunk=ep_chunk, episode_index=ep_index)
+ return Path(fpath)
+
+ def get_video_file_path(self, ep_index: int, vid_key: str) -> Path:
+ ep_chunk = self.get_episode_chunk(ep_index)
+ fpath = self.video_path.format(episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_index)
+ return Path(fpath)
+
+ def get_episode_chunk(self, ep_index: int) -> int:
+ return ep_index // self.chunks_size
+
+ @property
+ def data_path(self) -> str:
+ """Formattable string for the parquet files."""
+ return self.info["data_path"]
+
+ @property
+ def video_path(self) -> str | None:
+ """Formattable string for the video files."""
+ return self.info["video_path"]
+
+ @property
+ def robot_type(self) -> str | None:
+ """Robot type used in recording this dataset."""
+ return self.info["robot_type"]
+
+ @property
+ def fps(self) -> int:
+ """Frames per second used during data collection."""
+ return self.info["fps"]
+
+ @property
+ def features(self) -> dict[str, dict]:
+ """All features contained in the dataset."""
+ return self.info["features"]
+
+ @property
+ def image_keys(self) -> list[str]:
+ """Keys to access visual modalities stored as images."""
+ return [key for key, ft in self.features.items() if ft["dtype"] == "image"]
+
+ @property
+ def video_keys(self) -> list[str]:
+ """Keys to access visual modalities stored as videos."""
+ return [key for key, ft in self.features.items() if ft["dtype"] == "video"]
+
+ @property
+ def camera_keys(self) -> list[str]:
+ """Keys to access visual modalities (regardless of their storage method)."""
+ return [key for key, ft in self.features.items() if ft["dtype"] in ["video", "image"]]
+
+ @property
+ def names(self) -> dict[str, list | dict]:
+ """Names of the various dimensions of vector modalities."""
+ return {key: ft["names"] for key, ft in self.features.items()}
+
+ @property
+ def shapes(self) -> dict:
+ """Shapes for the different features."""
+ return {key: tuple(ft["shape"]) for key, ft in self.features.items()}
+
+ @property
+ def total_episodes(self) -> int:
+ """Total number of episodes available."""
+ return self.info["total_episodes"]
+
+ @property
+ def total_frames(self) -> int:
+ """Total number of frames saved in this dataset."""
+ return self.info["total_frames"]
+
+ @property
+ def total_tasks(self) -> int:
+ """Total number of different tasks performed in this dataset."""
+ return self.info["total_tasks"]
+
+ @property
+ def total_chunks(self) -> int:
+ """Total number of chunks (groups of episodes)."""
+ return self.info["total_chunks"]
+
+ @property
+ def chunks_size(self) -> int:
+ """Max number of episodes per chunk."""
+ return self.info["chunks_size"]
+
+ def get_task_index(self, task: str) -> int | None:
+ """
+ Given a task in natural language, returns its task_index if the task already exists in the dataset,
+ otherwise return None.
+ """
+ return self.task_to_task_index.get(task, None)
+
+ def add_task(self, task: str):
+ """
+ Given a task in natural language, add it to the dictionary of tasks.
+ """
+ if task in self.task_to_task_index:
+ raise ValueError(f"The task '{task}' already exists and can't be added twice.")
+
+ task_index = self.info["total_tasks"]
+ self.task_to_task_index[task] = task_index
+ self.tasks[task_index] = task
+ self.info["total_tasks"] += 1
+
+ task_dict = {
+ "task_index": task_index,
+ "task": task,
+ }
+ append_jsonlines(task_dict, self.root / TASKS_PATH)
+
+ def save_episode(
+ self,
+ episode_index: int,
+ episode_length: int,
+ episode_tasks: list[str],
+ episode_stats: dict[str, dict],
+ ) -> None:
+ self.info["total_episodes"] += 1
+ self.info["total_frames"] += episode_length
+
+ chunk = self.get_episode_chunk(episode_index)
+ if chunk >= self.total_chunks:
+ self.info["total_chunks"] += 1
+
+ self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"}
+ self.info["total_videos"] += len(self.video_keys)
+
+ write_info(self.info, self.root)
+
+ episode_dict = {
+ "episode_index": episode_index,
+ "tasks": episode_tasks,
+ "length": episode_length,
+ }
+ self.episodes[episode_index] = episode_dict
+ write_episode(episode_dict, self.root)
+
+ self.episodes_stats[episode_index] = episode_stats
+ self.stats = aggregate_stats([self.stats, episode_stats]) if self.stats else episode_stats
+ write_episode_stats(episode_index, episode_stats, self.root)
+
+ def update_video_info(self) -> None:
+ """
+ Warning: this function writes info from first episode videos, implicitly assuming that all videos have
+ been encoded the same way. Also, this means it assumes the first episode exists.
+ """
+ for key in self.video_keys:
+ if not self.features[key].get("info", None):
+ video_path = self.root / self.get_video_file_path(ep_index=0, vid_key=key)
+ self.info["features"][key]["info"] = get_video_info(video_path)
+
+ def __repr__(self):
+ feature_keys = list(self.features)
+ return (
+ f"{self.__class__.__name__}({{\n"
+ f" Repository ID: '{self.repo_id}',\n"
+ f" Total episodes: '{self.total_episodes}',\n"
+ f" Total frames: '{self.total_frames}',\n"
+ f" Features: '{feature_keys}',\n"
+ "})',\n"
+ )
+
+ @classmethod
+ def create(
+ cls,
+ repo_id: str,
+ fps: int,
+ features: dict,
+ robot_type: str | None = None,
+ root: str | Path | None = None,
+ use_videos: bool = True,
+ ) -> "LeRobotDatasetMetadata":
+ """Creates metadata for a LeRobotDataset."""
+ obj = cls.__new__(cls)
+ obj.repo_id = repo_id
+ obj.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id
+
+ obj.root.mkdir(parents=True, exist_ok=False)
+
+ # TODO(aliberts, rcadene): implement sanity check for features
+ features = {**features, **DEFAULT_FEATURES}
+ _validate_feature_names(features)
+
+ obj.tasks, obj.task_to_task_index = {}, {}
+ obj.episodes_stats, obj.stats, obj.episodes = {}, {}, {}
+ obj.info = create_empty_dataset_info(CODEBASE_VERSION, fps, features, use_videos, robot_type)
+ if len(obj.video_keys) > 0 and not use_videos:
+ raise ValueError()
+ write_json(obj.info, obj.root / INFO_PATH)
+ obj.revision = None
+ return obj
+
+
+class LeRobotDataset(torch.utils.data.Dataset):
+ def __init__(
+ self,
+ repo_id: str,
+ root: str | Path | None = None,
+ episodes: list[int] | None = None,
+ image_transforms: Callable | None = None,
+ delta_timestamps: dict[list[float]] | None = None,
+ tolerance_s: float = 1e-4,
+ revision: str | None = None,
+ force_cache_sync: bool = False,
+ download_videos: bool = True,
+ video_backend: str | None = None,
+ batch_encoding_size: int = 1,
+ ):
+ """
+ 2 modes are available for instantiating this class, depending on 2 different use cases:
+
+ 1. Your dataset already exists:
+ - On your local disk in the 'root' folder. This is typically the case when you recorded your
+ dataset locally and you may or may not have pushed it to the hub yet. Instantiating this class
+ with 'root' will load your dataset directly from disk. This can happen while you're offline (no
+ internet connection).
+
+ - On the Hugging Face Hub at the address https://huggingface.co/datasets/{repo_id} and not on
+ your local disk in the 'root' folder. Instantiating this class with this 'repo_id' will download
+ the dataset from that address and load it, pending your dataset is compliant with
+ codebase_version v2.0. If your dataset has been created before this new format, you will be
+ prompted to convert it using our conversion script from v1.6 to v2.0, which you can find at
+ lerobot/datasets/v2/convert_dataset_v1_to_v2.py.
+
+
+ 2. Your dataset doesn't already exists (either on local disk or on the Hub): you can create an empty
+ LeRobotDataset with the 'create' classmethod. This can be used for recording a dataset or port an
+ existing dataset to the LeRobotDataset format.
+
+
+ In terms of files, LeRobotDataset encapsulates 3 main things:
+ - metadata:
+ - info contains various information about the dataset like shapes, keys, fps etc.
+ - stats stores the dataset statistics of the different modalities for normalization
+ - tasks contains the prompts for each task of the dataset, which can be used for
+ task-conditioned training.
+ - hf_dataset (from datasets.Dataset), which will read any values from parquet files.
+ - videos (optional) from which frames are loaded to be synchronous with data from parquet files.
+
+ A typical LeRobotDataset looks like this from its root path:
+ .
+ ├── data
+ │ ├── chunk-000
+ │ │ ├── episode_000000.parquet
+ │ │ ├── episode_000001.parquet
+ │ │ ├── episode_000002.parquet
+ │ │ └── ...
+ │ ├── chunk-001
+ │ │ ├── episode_001000.parquet
+ │ │ ├── episode_001001.parquet
+ │ │ ├── episode_001002.parquet
+ │ │ └── ...
+ │ └── ...
+ ├── meta
+ │ ├── episodes.jsonl
+ │ ├── info.json
+ │ ├── stats.json
+ │ └── tasks.jsonl
+ └── videos
+ ├── chunk-000
+ │ ├── observation.images.laptop
+ │ │ ├── episode_000000.mp4
+ │ │ ├── episode_000001.mp4
+ │ │ ├── episode_000002.mp4
+ │ │ └── ...
+ │ ├── observation.images.phone
+ │ │ ├── episode_000000.mp4
+ │ │ ├── episode_000001.mp4
+ │ │ ├── episode_000002.mp4
+ │ │ └── ...
+ ├── chunk-001
+ └── ...
+
+ Note that this file-based structure is designed to be as versatile as possible. The files are split by
+ episodes which allows a more granular control over which episodes one wants to use and download. The
+ structure of the dataset is entirely described in the info.json file, which can be easily downloaded
+ or viewed directly on the hub before downloading any actual data. The type of files used are very
+ simple and do not need complex tools to be read, it only uses .parquet, .json and .mp4 files (and .md
+ for the README).
+
+ Args:
+ repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset
+ will be stored under root/repo_id.
+ root (Path | None, optional): Local directory to use for downloading/writing files. You can also
+ set the LEROBOT_HOME environment variable to point to a different location. Defaults to
+ '~/.cache/huggingface/lerobot'.
+ episodes (list[int] | None, optional): If specified, this will only load episodes specified by
+ their episode_index in this list. Defaults to None.
+ image_transforms (Callable | None, optional): You can pass standard v2 image transforms from
+ torchvision.transforms.v2 here which will be applied to visual modalities (whether they come
+ from videos or images). Defaults to None.
+ delta_timestamps (dict[list[float]] | None, optional): _description_. Defaults to None.
+ tolerance_s (float, optional): Tolerance in seconds used to ensure data timestamps are actually in
+ sync with the fps value. It is used at the init of the dataset to make sure that each
+ timestamps is separated to the next by 1/fps +/- tolerance_s. This also applies to frames
+ decoded from video files. It is also used to check that `delta_timestamps` (when provided) are
+ multiples of 1/fps. Defaults to 1e-4.
+ revision (str, optional): An optional Git revision id which can be a branch name, a tag, or a
+ commit hash. Defaults to current codebase version tag.
+ sync_cache_first (bool, optional): Flag to sync and refresh local files first. If True and files
+ are already present in the local cache, this will be faster. However, files loaded might not
+ be in sync with the version on the hub, especially if you specified 'revision'. Defaults to
+ False.
+ download_videos (bool, optional): Flag to download the videos. Note that when set to True but the
+ video files are already present on local disk, they won't be downloaded again. Defaults to
+ True.
+ video_backend (str | None, optional): Video backend to use for decoding videos. Defaults to torchcodec when available int the platform; otherwise, defaults to 'pyav'.
+ You can also use the 'pyav' decoder used by Torchvision, which used to be the default option, or 'video_reader' which is another decoder of Torchvision.
+ batch_encoding_size (int, optional): Number of episodes to accumulate before batch encoding videos.
+ Set to 1 for immediate encoding (default), or higher for batched encoding. Defaults to 1.
+ """
+ super().__init__()
+ self.repo_id = repo_id
+ self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id
+ self.image_transforms = image_transforms
+ self.delta_timestamps = delta_timestamps
+ self.episodes = episodes
+ self.tolerance_s = tolerance_s
+ self.revision = revision if revision else CODEBASE_VERSION
+ self.video_backend = video_backend if video_backend else get_safe_default_codec()
+ self.delta_indices = None
+ self.batch_encoding_size = batch_encoding_size
+ self.episodes_since_last_encoding = 0
+
+ # Unused attributes
+ self.image_writer = None
+ self.episode_buffer = None
+
+ self.root.mkdir(exist_ok=True, parents=True)
+
+ # Load metadata
+ self.meta = LeRobotDatasetMetadata(
+ self.repo_id, self.root, self.revision, force_cache_sync=force_cache_sync
+ )
+ if self.episodes is not None and self.meta._version >= packaging.version.parse("v2.1"):
+ episodes_stats = [self.meta.episodes_stats[ep_idx] for ep_idx in self.episodes]
+ self.stats = aggregate_stats(episodes_stats)
+
+ # Load actual data
+ try:
+ if force_cache_sync:
+ raise FileNotFoundError
+ assert all((self.root / fpath).is_file() for fpath in self.get_episodes_file_paths())
+ self.hf_dataset = self.load_hf_dataset()
+ except (AssertionError, FileNotFoundError, NotADirectoryError):
+ self.revision = get_safe_version(self.repo_id, self.revision)
+ self.download_episodes(download_videos)
+ self.hf_dataset = self.load_hf_dataset()
+
+ self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes)
+
+ # Check timestamps
+ timestamps = torch.stack(self.hf_dataset["timestamp"]).numpy()
+ episode_indices = torch.stack(self.hf_dataset["episode_index"]).numpy()
+ ep_data_index_np = {k: t.numpy() for k, t in self.episode_data_index.items()}
+ check_timestamps_sync(timestamps, episode_indices, ep_data_index_np, self.fps, self.tolerance_s)
+
+ # Setup delta_indices
+ if self.delta_timestamps is not None:
+ check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s)
+ self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps)
+
+ def push_to_hub(
+ self,
+ branch: str | None = None,
+ tags: list | None = None,
+ license: str | None = "apache-2.0",
+ tag_version: bool = True,
+ push_videos: bool = True,
+ private: bool = False,
+ allow_patterns: list[str] | str | None = None,
+ upload_large_folder: bool = False,
+ **card_kwargs,
+ ) -> None:
+ ignore_patterns = ["images/"]
+ if not push_videos:
+ ignore_patterns.append("videos/")
+
+ hub_api = HfApi()
+ hub_api.create_repo(
+ repo_id=self.repo_id,
+ private=private,
+ repo_type="dataset",
+ exist_ok=True,
+ )
+ if branch:
+ hub_api.create_branch(
+ repo_id=self.repo_id,
+ branch=branch,
+ revision=self.revision,
+ repo_type="dataset",
+ exist_ok=True,
+ )
+
+ upload_kwargs = {
+ "repo_id": self.repo_id,
+ "folder_path": self.root,
+ "repo_type": "dataset",
+ "revision": branch,
+ "allow_patterns": allow_patterns,
+ "ignore_patterns": ignore_patterns,
+ }
+ if upload_large_folder:
+ hub_api.upload_large_folder(**upload_kwargs)
+ else:
+ hub_api.upload_folder(**upload_kwargs)
+
+ if not hub_api.file_exists(self.repo_id, REPOCARD_NAME, repo_type="dataset", revision=branch):
+ card = create_lerobot_dataset_card(
+ tags=tags, dataset_info=self.meta.info, license=license, **card_kwargs
+ )
+ card.push_to_hub(repo_id=self.repo_id, repo_type="dataset", revision=branch)
+
+ if tag_version:
+ with contextlib.suppress(RevisionNotFoundError):
+ hub_api.delete_tag(self.repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
+ hub_api.create_tag(self.repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
+
+ def pull_from_repo(
+ self,
+ allow_patterns: list[str] | str | None = None,
+ ignore_patterns: list[str] | str | None = None,
+ ) -> None:
+ snapshot_download(
+ self.repo_id,
+ repo_type="dataset",
+ revision=self.revision,
+ local_dir=self.root,
+ allow_patterns=allow_patterns,
+ ignore_patterns=ignore_patterns,
+ )
+
+ def download_episodes(self, download_videos: bool = True) -> None:
+ """Downloads the dataset from the given 'repo_id' at the provided version. If 'episodes' is given, this
+ will only download those episodes (selected by their episode_index). If 'episodes' is None, the whole
+ dataset will be downloaded. Thanks to the behavior of snapshot_download, if the files are already present
+ in 'local_dir', they won't be downloaded again.
+ """
+ # TODO(rcadene, aliberts): implement faster transfer
+ # https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads
+ files = None
+ ignore_patterns = None if download_videos else "videos/"
+ if self.episodes is not None:
+ files = self.get_episodes_file_paths()
+
+ self.pull_from_repo(allow_patterns=files, ignore_patterns=ignore_patterns)
+
+ def get_episodes_file_paths(self) -> list[Path]:
+ episodes = self.episodes if self.episodes is not None else list(range(self.meta.total_episodes))
+ fpaths = [str(self.meta.get_data_file_path(ep_idx)) for ep_idx in episodes]
+ if len(self.meta.video_keys) > 0:
+ video_files = [
+ str(self.meta.get_video_file_path(ep_idx, vid_key))
+ for vid_key in self.meta.video_keys
+ for ep_idx in episodes
+ ]
+ fpaths += video_files
+
+ return fpaths
+
+ def load_hf_dataset(self) -> datasets.Dataset:
+ """hf_dataset contains all the observations, states, actions, rewards, etc."""
+ if self.episodes is None:
+ path = str(self.root / "data")
+ hf_dataset = load_dataset("parquet", data_dir=path, split="train")
+ else:
+ files = [str(self.root / self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes]
+ hf_dataset = load_dataset("parquet", data_files=files, split="train")
+
+ # TODO(aliberts): hf_dataset.set_format("torch")
+ hf_dataset.set_transform(hf_transform_to_torch)
+ return hf_dataset
+
+ def create_hf_dataset(self) -> datasets.Dataset:
+ features = get_hf_features_from_features(self.features)
+ ft_dict = {col: [] for col in features}
+ hf_dataset = datasets.Dataset.from_dict(ft_dict, features=features, split="train")
+
+ # TODO(aliberts): hf_dataset.set_format("torch")
+ hf_dataset.set_transform(hf_transform_to_torch)
+ return hf_dataset
+
+ @property
+ def fps(self) -> int:
+ """Frames per second used during data collection."""
+ return self.meta.fps
+
+ @property
+ def num_frames(self) -> int:
+ """Number of frames in selected episodes."""
+ return len(self.hf_dataset) if self.hf_dataset is not None else self.meta.total_frames
+
+ @property
+ def num_episodes(self) -> int:
+ """Number of episodes selected."""
+ return len(self.episodes) if self.episodes is not None else self.meta.total_episodes
+
+ @property
+ def features(self) -> dict[str, dict]:
+ return self.meta.features
+
+ @property
+ def hf_features(self) -> datasets.Features:
+ """Features of the hf_dataset."""
+ if self.hf_dataset is not None:
+ return self.hf_dataset.features
+ else:
+ return get_hf_features_from_features(self.features)
+
+ def _get_query_indices(self, idx: int, ep_idx: int) -> tuple[dict[str, list[int | bool]]]:
+ ep_start = self.episode_data_index["from"][ep_idx]
+ ep_end = self.episode_data_index["to"][ep_idx]
+ query_indices = {
+ key: [max(ep_start.item(), min(ep_end.item() - 1, idx + delta)) for delta in delta_idx]
+ for key, delta_idx in self.delta_indices.items()
+ }
+ padding = { # Pad values outside of current episode range
+ f"{key}_is_pad": torch.BoolTensor(
+ [(idx + delta < ep_start.item()) | (idx + delta >= ep_end.item()) for delta in delta_idx]
+ )
+ for key, delta_idx in self.delta_indices.items()
+ }
+ return query_indices, padding
+
+ def _get_query_timestamps(
+ self,
+ current_ts: float,
+ query_indices: dict[str, list[int]] | None = None,
+ ) -> dict[str, list[float]]:
+ query_timestamps = {}
+ for key in self.meta.video_keys:
+ if query_indices is not None and key in query_indices:
+ timestamps = self.hf_dataset.select(query_indices[key])["timestamp"]
+ query_timestamps[key] = torch.stack(timestamps).tolist()
+ else:
+ query_timestamps[key] = [current_ts]
+
+ return query_timestamps
+
+ def _query_hf_dataset(self, query_indices: dict[str, list[int]]) -> dict:
+ return {
+ key: torch.stack(self.hf_dataset.select(q_idx)[key])
+ for key, q_idx in query_indices.items()
+ if key not in self.meta.video_keys
+ }
+
+ def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict[str, torch.Tensor]:
+ """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function
+ in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a
+ Segmentation Fault. This probably happens because a memory reference to the video loader is created in
+ the main process and a subprocess fails to access it.
+ """
+ item = {}
+ for vid_key, query_ts in query_timestamps.items():
+ video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key)
+ frames = decode_video_frames(video_path, query_ts, self.tolerance_s, self.video_backend)
+ item[vid_key] = frames.squeeze(0)
+
+ return item
+
+ def _add_padding_keys(self, item: dict, padding: dict[str, list[bool]]) -> dict:
+ for key, val in padding.items():
+ item[key] = torch.BoolTensor(val)
+ return item
+
+ def __len__(self):
+ return self.num_frames
+
+ def __getitem__(self, idx) -> dict:
+ item = self.hf_dataset[idx]
+ ep_idx = item["episode_index"].item()
+
+ query_indices = None
+ if self.delta_indices is not None:
+ query_indices, padding = self._get_query_indices(idx, ep_idx)
+ query_result = self._query_hf_dataset(query_indices)
+ item = {**item, **padding}
+ for key, val in query_result.items():
+ item[key] = val
+
+ if len(self.meta.video_keys) > 0:
+ current_ts = item["timestamp"].item()
+ query_timestamps = self._get_query_timestamps(current_ts, query_indices)
+ video_frames = self._query_videos(query_timestamps, ep_idx)
+ item = {**video_frames, **item}
+
+ if self.image_transforms is not None:
+ image_keys = self.meta.camera_keys
+ for cam in image_keys:
+ item[cam] = self.image_transforms(item[cam])
+
+ # Add task as a string
+ task_idx = item["task_index"].item()
+ item["task"] = self.meta.tasks[task_idx]
+
+ return item
+
+ def __repr__(self):
+ feature_keys = list(self.features)
+ return (
+ f"{self.__class__.__name__}({{\n"
+ f" Repository ID: '{self.repo_id}',\n"
+ f" Number of selected episodes: '{self.num_episodes}',\n"
+ f" Number of selected samples: '{self.num_frames}',\n"
+ f" Features: '{feature_keys}',\n"
+ "})',\n"
+ )
+
+ def create_episode_buffer(self, episode_index: int | None = None) -> dict:
+ current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index
+ ep_buffer = {}
+ # size and task are special cases that are not in self.features
+ ep_buffer["size"] = 0
+ ep_buffer["task"] = []
+ for key in self.features:
+ ep_buffer[key] = current_ep_idx if key == "episode_index" else []
+ return ep_buffer
+
+ def _get_image_file_path(self, episode_index: int, image_key: str, frame_index: int) -> Path:
+ fpath = DEFAULT_IMAGE_PATH.format(
+ image_key=image_key, episode_index=episode_index, frame_index=frame_index
+ )
+ return self.root / fpath
+
+ def _save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path) -> None:
+ if self.image_writer is None:
+ if isinstance(image, torch.Tensor):
+ image = image.cpu().numpy()
+ write_image(image, fpath)
+ else:
+ self.image_writer.save_image(image=image, fpath=fpath)
+
+ def add_frame(self, frame: dict, task: str, timestamp: float | None = None) -> None:
+ """
+ This function only adds the frame to the episode_buffer. Apart from images — which are written in a
+ temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method
+ then needs to be called.
+ """
+ # Convert torch to numpy if needed
+ for name in frame:
+ if isinstance(frame[name], torch.Tensor):
+ frame[name] = frame[name].numpy()
+
+ validate_frame(frame, self.features)
+
+ if self.episode_buffer is None:
+ self.episode_buffer = self.create_episode_buffer()
+
+ # Automatically add frame_index and timestamp to episode buffer
+ frame_index = self.episode_buffer["size"]
+ if timestamp is None:
+ timestamp = frame_index / self.fps
+ self.episode_buffer["frame_index"].append(frame_index)
+ self.episode_buffer["timestamp"].append(timestamp)
+ self.episode_buffer["task"].append(task)
+
+ # Add frame features to episode_buffer
+ for key in frame:
+ if key not in self.features:
+ raise ValueError(
+ f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'."
+ )
+
+ if self.features[key]["dtype"] in ["image", "video"]:
+ img_path = self._get_image_file_path(
+ episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index
+ )
+ if frame_index == 0:
+ img_path.parent.mkdir(parents=True, exist_ok=True)
+ self._save_image(frame[key], img_path)
+ self.episode_buffer[key].append(str(img_path))
+ else:
+ self.episode_buffer[key].append(frame[key])
+
+ self.episode_buffer["size"] += 1
+
+ def save_episode(self, episode_data: dict | None = None) -> None:
+ """
+ This will save to disk the current episode in self.episode_buffer.
+
+ Video encoding is handled automatically based on batch_encoding_size:
+ - If batch_encoding_size == 1: Videos are encoded immediately after each episode
+ - If batch_encoding_size > 1: Videos are encoded in batches.
+
+ Args:
+ episode_data (dict | None, optional): Dict containing the episode data to save. If None, this will
+ save the current episode in self.episode_buffer, which is filled with 'add_frame'. Defaults to
+ None.
+ """
+ if not episode_data:
+ episode_buffer = self.episode_buffer
+
+ validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features)
+
+ # size and task are special cases that won't be added to hf_dataset
+ episode_length = episode_buffer.pop("size")
+ tasks = episode_buffer.pop("task")
+ episode_tasks = list(set(tasks))
+ episode_index = episode_buffer["episode_index"]
+
+ episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length)
+ episode_buffer["episode_index"] = np.full((episode_length,), episode_index)
+
+ # Add new tasks to the tasks dictionary
+ for task in episode_tasks:
+ task_index = self.meta.get_task_index(task)
+ if task_index is None:
+ self.meta.add_task(task)
+
+ # Given tasks in natural language, find their corresponding task indices
+ episode_buffer["task_index"] = np.array([self.meta.get_task_index(task) for task in tasks])
+
+ for key, ft in self.features.items():
+ # index, episode_index, task_index are already processed above, and image and video
+ # are processed separately by storing image path and frame info as meta data
+ if key in ["index", "episode_index", "task_index"] or ft["dtype"] in ["image", "video"]:
+ continue
+ episode_buffer[key] = np.stack(episode_buffer[key])
+
+ self._wait_image_writer()
+ self._save_episode_table(episode_buffer, episode_index)
+ ep_stats = compute_episode_stats(episode_buffer, self.features)
+
+ has_video_keys = len(self.meta.video_keys) > 0
+ use_batched_encoding = self.batch_encoding_size > 1
+
+ if has_video_keys and not use_batched_encoding:
+ self.encode_episode_videos(episode_index)
+
+ # `meta.save_episode` should be executed after encoding the videos
+ self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats)
+
+ # Check if we should trigger batch encoding
+ if has_video_keys and use_batched_encoding:
+ self.episodes_since_last_encoding += 1
+ if self.episodes_since_last_encoding == self.batch_encoding_size:
+ start_ep = self.num_episodes - self.batch_encoding_size
+ end_ep = self.num_episodes
+ logging.info(
+ f"Batch encoding {self.batch_encoding_size} videos for episodes {start_ep} to {end_ep - 1}"
+ )
+ self.batch_encode_videos(start_ep, end_ep)
+ self.episodes_since_last_encoding = 0
+
+ # Episode data index and timestamp checking
+ ep_data_index = get_episode_data_index(self.meta.episodes, [episode_index])
+ ep_data_index_np = {k: t.numpy() for k, t in ep_data_index.items()}
+ check_timestamps_sync(
+ episode_buffer["timestamp"],
+ episode_buffer["episode_index"],
+ ep_data_index_np,
+ self.fps,
+ self.tolerance_s,
+ )
+
+ # Verify that we have one parquet file per episode and the number of video files matches the number of encoded episodes
+ parquet_files = list(self.root.rglob("*.parquet"))
+ assert len(parquet_files) == self.num_episodes
+ video_files = list(self.root.rglob("*.mp4"))
+ assert len(video_files) == (self.num_episodes - self.episodes_since_last_encoding) * len(
+ self.meta.video_keys
+ )
+
+ if not episode_data: # Reset the buffer
+ self.episode_buffer = self.create_episode_buffer()
+
+ def _save_episode_table(self, episode_buffer: dict, episode_index: int) -> None:
+ episode_dict = {key: episode_buffer[key] for key in self.hf_features}
+ ep_dataset = datasets.Dataset.from_dict(episode_dict, features=self.hf_features, split="train")
+ ep_dataset = embed_images(ep_dataset)
+ self.hf_dataset = concatenate_datasets([self.hf_dataset, ep_dataset])
+ self.hf_dataset.set_transform(hf_transform_to_torch)
+ ep_data_path = self.root / self.meta.get_data_file_path(ep_index=episode_index)
+ ep_data_path.parent.mkdir(parents=True, exist_ok=True)
+ ep_dataset.to_parquet(ep_data_path)
+
+ def clear_episode_buffer(self) -> None:
+ episode_index = self.episode_buffer["episode_index"]
+
+ # Clean up image files for the current episode buffer
+ if self.image_writer is not None:
+ for cam_key in self.meta.camera_keys:
+ img_dir = self._get_image_file_path(
+ episode_index=episode_index, image_key=cam_key, frame_index=0
+ ).parent
+ if img_dir.is_dir():
+ shutil.rmtree(img_dir)
+
+ # Reset the buffer
+ self.episode_buffer = self.create_episode_buffer()
+
+ def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> None:
+ if isinstance(self.image_writer, AsyncImageWriter):
+ logging.warning(
+ "You are starting a new AsyncImageWriter that is replacing an already existing one in the dataset."
+ )
+
+ self.image_writer = AsyncImageWriter(
+ num_processes=num_processes,
+ num_threads=num_threads,
+ )
+
+ def stop_image_writer(self) -> None:
+ """
+ Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to
+ remove the image_writer in order for the LeRobotDataset object to be picklable and parallelized.
+ """
+ if self.image_writer is not None:
+ self.image_writer.stop()
+ self.image_writer = None
+
+ def _wait_image_writer(self) -> None:
+ """Wait for asynchronous image writer to finish."""
+ if self.image_writer is not None:
+ self.image_writer.wait_until_done()
+
+ def encode_episode_videos(self, episode_index: int) -> None:
+ """
+ Use ffmpeg to convert frames stored as png into mp4 videos.
+ Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
+ since video encoding with ffmpeg is already using multithreading.
+
+ This method handles video encoding steps:
+ - Video encoding via ffmpeg
+ - Video info updating in metadata
+ - Raw image cleanup
+
+ Args:
+ episode_index (int): Index of the episode to encode.
+ """
+ for key in self.meta.video_keys:
+ video_path = self.root / self.meta.get_video_file_path(episode_index, key)
+ if video_path.is_file():
+ # Skip if video is already encoded. Could be the case when resuming data recording.
+ continue
+ img_dir = self._get_image_file_path(
+ episode_index=episode_index, image_key=key, frame_index=0
+ ).parent
+ encode_video_frames(img_dir, video_path, self.fps, overwrite=True)
+ shutil.rmtree(img_dir)
+
+ # Update video info (only needed when first episode is encoded since it reads from episode 0)
+ if len(self.meta.video_keys) > 0 and episode_index == 0:
+ self.meta.update_video_info()
+ write_info(self.meta.info, self.meta.root) # ensure video info always written properly
+
+ def batch_encode_videos(self, start_episode: int = 0, end_episode: int | None = None) -> None:
+ """
+ Batch encode videos for multiple episodes.
+
+ Args:
+ start_episode: Starting episode index (inclusive)
+ end_episode: Ending episode index (exclusive). If None, encodes all episodes from start_episode
+ """
+ if end_episode is None:
+ end_episode = self.meta.total_episodes
+
+ logging.info(f"Starting batch video encoding for episodes {start_episode} to {end_episode - 1}")
+
+ # Encode all episodes with cleanup enabled for individual episodes
+ for ep_idx in range(start_episode, end_episode):
+ logging.info(f"Encoding videos for episode {ep_idx}")
+ self.encode_episode_videos(ep_idx)
+
+ logging.info("Batch video encoding completed")
+
+ @classmethod
+ def create(
+ cls,
+ repo_id: str,
+ fps: int,
+ features: dict,
+ root: str | Path | None = None,
+ robot_type: str | None = None,
+ use_videos: bool = True,
+ tolerance_s: float = 1e-4,
+ image_writer_processes: int = 0,
+ image_writer_threads: int = 0,
+ video_backend: str | None = None,
+ batch_encoding_size: int = 1,
+ ) -> "LeRobotDataset":
+ """Create a LeRobot Dataset from scratch in order to record data."""
+ obj = cls.__new__(cls)
+ obj.meta = LeRobotDatasetMetadata.create(
+ repo_id=repo_id,
+ fps=fps,
+ robot_type=robot_type,
+ features=features,
+ root=root,
+ use_videos=use_videos,
+ )
+ obj.repo_id = obj.meta.repo_id
+ obj.root = obj.meta.root
+ obj.revision = None
+ obj.tolerance_s = tolerance_s
+ obj.image_writer = None
+ obj.batch_encoding_size = batch_encoding_size
+ obj.episodes_since_last_encoding = 0
+
+ if image_writer_processes or image_writer_threads:
+ obj.start_image_writer(image_writer_processes, image_writer_threads)
+
+ # TODO(aliberts, rcadene, alexander-soare): Merge this with OnlineBuffer/DataBuffer
+ obj.episode_buffer = obj.create_episode_buffer()
+
+ obj.episodes = None
+ obj.hf_dataset = obj.create_hf_dataset()
+ obj.image_transforms = None
+ obj.delta_timestamps = None
+ obj.delta_indices = None
+ obj.episode_data_index = None
+ obj.video_backend = video_backend if video_backend is not None else get_safe_default_codec()
+ return obj
+
+
+class MultiLeRobotDataset(torch.utils.data.Dataset):
+ """A dataset consisting of multiple underlying `LeRobotDataset`s.
+
+ The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API
+ structure of `LeRobotDataset`.
+ """
+
+ def __init__(
+ self,
+ repo_ids: list[str],
+ root: str | Path | None = None,
+ episodes: dict | None = None,
+ image_transforms: Callable | None = None,
+ delta_timestamps: dict[list[float]] | None = None,
+ tolerances_s: dict | None = None,
+ download_videos: bool = True,
+ video_backend: str | None = None,
+ ):
+ super().__init__()
+ self.repo_ids = repo_ids
+ self.root = Path(root) if root else HF_LEROBOT_HOME
+ self.tolerances_s = tolerances_s if tolerances_s else dict.fromkeys(repo_ids, 0.0001)
+ # Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which
+ # are handled by this class.
+ self._datasets = [
+ LeRobotDataset(
+ repo_id,
+ root=self.root / repo_id,
+ episodes=episodes[repo_id] if episodes else None,
+ image_transforms=image_transforms,
+ delta_timestamps=delta_timestamps,
+ tolerance_s=self.tolerances_s[repo_id],
+ download_videos=download_videos,
+ video_backend=video_backend,
+ )
+ for repo_id in repo_ids
+ ]
+
+ # Disable any data keys that are not common across all of the datasets. Note: we may relax this
+ # restriction in future iterations of this class. For now, this is necessary at least for being able
+ # to use PyTorch's default DataLoader collate function.
+ self.disabled_features = set()
+ intersection_features = set(self._datasets[0].features)
+ for ds in self._datasets:
+ intersection_features.intersection_update(ds.features)
+ if len(intersection_features) == 0:
+ raise RuntimeError(
+ "Multiple datasets were provided but they had no keys common to all of them. "
+ "The multi-dataset functionality currently only keeps common keys."
+ )
+ for repo_id, ds in zip(self.repo_ids, self._datasets, strict=True):
+ extra_keys = set(ds.features).difference(intersection_features)
+ logging.warning(
+ f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the "
+ "other datasets."
+ )
+ self.disabled_features.update(extra_keys)
+
+ self.image_transforms = image_transforms
+ self.delta_timestamps = delta_timestamps
+ # TODO(rcadene, aliberts): We should not perform this aggregation for datasets
+ # with multiple robots of different ranges. Instead we should have one normalization
+ # per robot.
+ self.stats = aggregate_stats([dataset.meta.stats for dataset in self._datasets])
+
+ @property
+ def repo_id_to_index(self):
+ """Return a mapping from dataset repo_id to a dataset index automatically created by this class.
+
+ This index is incorporated as a data key in the dictionary returned by `__getitem__`.
+ """
+ return {repo_id: i for i, repo_id in enumerate(self.repo_ids)}
+
+ @property
+ def repo_index_to_id(self):
+ """Return the inverse mapping if repo_id_to_index."""
+ return {v: k for k, v in self.repo_id_to_index}
+
+ @property
+ def fps(self) -> int:
+ """Frames per second used during data collection.
+
+ NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
+ """
+ return self._datasets[0].meta.info["fps"]
+
+ @property
+ def video(self) -> bool:
+ """Returns True if this dataset loads video frames from mp4 files.
+
+ Returns False if it only loads images from png files.
+
+ NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
+ """
+ return self._datasets[0].meta.info.get("video", False)
+
+ @property
+ def features(self) -> datasets.Features:
+ features = {}
+ for dataset in self._datasets:
+ features.update({k: v for k, v in dataset.hf_features.items() if k not in self.disabled_features})
+ return features
+
+ @property
+ def camera_keys(self) -> list[str]:
+ """Keys to access image and video stream from cameras."""
+ keys = []
+ for key, feats in self.features.items():
+ if isinstance(feats, (datasets.Image, VideoFrame)):
+ keys.append(key)
+ return keys
+
+ @property
+ def video_frame_keys(self) -> list[str]:
+ """Keys to access video frames that requires to be decoded into images.
+
+ Note: It is empty if the dataset contains images only,
+ or equal to `self.cameras` if the dataset contains videos only,
+ or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
+ """
+ video_frame_keys = []
+ for key, feats in self.features.items():
+ if isinstance(feats, VideoFrame):
+ video_frame_keys.append(key)
+ return video_frame_keys
+
+ @property
+ def num_frames(self) -> int:
+ """Number of samples/frames."""
+ return sum(d.num_frames for d in self._datasets)
+
+ @property
+ def num_episodes(self) -> int:
+ """Number of episodes."""
+ return sum(d.num_episodes for d in self._datasets)
+
+ @property
+ def tolerance_s(self) -> float:
+ """Tolerance in seconds used to discard loaded frames when their timestamps
+ are not close enough from the requested frames. It is only used when `delta_timestamps`
+ is provided or when loading video frames from mp4 files.
+ """
+ # 1e-4 to account for possible numerical error
+ return 1 / self.fps - 1e-4
+
+ def __len__(self):
+ return self.num_frames
+
+ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]:
+ if idx >= len(self):
+ raise IndexError(f"Index {idx} out of bounds.")
+ # Determine which dataset to get an item from based on the index.
+ start_idx = 0
+ dataset_idx = 0
+ for dataset in self._datasets:
+ if idx >= start_idx + dataset.num_frames:
+ start_idx += dataset.num_frames
+ dataset_idx += 1
+ continue
+ break
+ else:
+ raise AssertionError("We expect the loop to break out as long as the index is within bounds.")
+ item = self._datasets[dataset_idx][idx - start_idx]
+ item["dataset_index"] = torch.tensor(dataset_idx)
+ for data_key in self.disabled_features:
+ if data_key in item:
+ del item[data_key]
+
+ return item
+
+ def __repr__(self):
+ return (
+ f"{self.__class__.__name__}(\n"
+ f" Repository IDs: '{self.repo_ids}',\n"
+ f" Number of Samples: {self.num_frames},\n"
+ f" Number of Episodes: {self.num_episodes},\n"
+ f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
+ f" Recorded Frames per Second: {self.fps},\n"
+ f" Camera Keys: {self.camera_keys},\n"
+ f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
+ f" Transformations: {self.image_transforms},\n"
+ f")"
+ )
diff --git a/lerobot/common/datasets/online_buffer.py b/src/lerobot/datasets/online_buffer.py
similarity index 99%
rename from lerobot/common/datasets/online_buffer.py
rename to src/lerobot/datasets/online_buffer.py
index d907e46874..79f48f49dd 100644
--- a/lerobot/common/datasets/online_buffer.py
+++ b/src/lerobot/datasets/online_buffer.py
@@ -28,7 +28,7 @@
import numpy as np
import torch
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
def _make_memmap_safe(**kwargs) -> np.memmap:
diff --git a/src/lerobot/datasets/push_dataset_to_hub/utils.py b/src/lerobot/datasets/push_dataset_to_hub/utils.py
new file mode 100644
index 0000000000..5f6363a77b
--- /dev/null
+++ b/src/lerobot/datasets/push_dataset_to_hub/utils.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import inspect
+from concurrent.futures import ThreadPoolExecutor
+from pathlib import Path
+
+import datasets
+import numpy
+import PIL
+import torch
+
+from lerobot.datasets.video_utils import encode_video_frames
+
+
+def concatenate_episodes(ep_dicts):
+ data_dict = {}
+
+ keys = ep_dicts[0].keys()
+ for key in keys:
+ if torch.is_tensor(ep_dicts[0][key][0]):
+ data_dict[key] = torch.cat([ep_dict[key] for ep_dict in ep_dicts])
+ else:
+ if key not in data_dict:
+ data_dict[key] = []
+ for ep_dict in ep_dicts:
+ for x in ep_dict[key]:
+ data_dict[key].append(x)
+
+ total_frames = data_dict["frame_index"].shape[0]
+ data_dict["index"] = torch.arange(0, total_frames, 1)
+ return data_dict
+
+
+def save_images_concurrently(imgs_array: numpy.array, out_dir: Path, max_workers: int = 4):
+ out_dir = Path(out_dir)
+ out_dir.mkdir(parents=True, exist_ok=True)
+
+ def save_image(img_array, i, out_dir):
+ img = PIL.Image.fromarray(img_array)
+ img.save(str(out_dir / f"frame_{i:06d}.png"), quality=100)
+
+ num_images = len(imgs_array)
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
+ [executor.submit(save_image, imgs_array[i], i, out_dir) for i in range(num_images)]
+
+
+def get_default_encoding() -> dict:
+ """Returns the default ffmpeg encoding parameters used by `encode_video_frames`."""
+ signature = inspect.signature(encode_video_frames)
+ return {
+ k: v.default
+ for k, v in signature.parameters.items()
+ if v.default is not inspect.Parameter.empty and k in ["vcodec", "pix_fmt", "g", "crf"]
+ }
+
+
+def check_repo_id(repo_id: str) -> None:
+ if len(repo_id.split("/")) != 2:
+ raise ValueError(
+ f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset
+ (e.g. 'lerobot/pusht'), but contains '{repo_id}'."""
+ )
+
+
+# TODO(aliberts): remove
+def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]:
+ """
+ Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.
+
+ Parameters:
+ - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index.
+
+ Returns:
+ - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys:
+ - "from": A tensor containing the starting index of each episode.
+ - "to": A tensor containing the ending index of each episode.
+ """
+ episode_data_index = {"from": [], "to": []}
+
+ current_episode = None
+ """
+ The episode_index is a list of integers, each representing the episode index of the corresponding example.
+ For instance, the following is a valid episode_index:
+ [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
+
+ Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and
+ ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this:
+ {
+ "from": [0, 3, 7],
+ "to": [3, 7, 12]
+ }
+ """
+ if len(hf_dataset) == 0:
+ episode_data_index = {
+ "from": torch.tensor([]),
+ "to": torch.tensor([]),
+ }
+ return episode_data_index
+ for idx, episode_idx in enumerate(hf_dataset["episode_index"]):
+ if episode_idx != current_episode:
+ # We encountered a new episode, so we append its starting location to the "from" list
+ episode_data_index["from"].append(idx)
+ # If this is not the first episode, we append the ending location of the previous episode to the "to" list
+ if current_episode is not None:
+ episode_data_index["to"].append(idx)
+ # Let's keep track of the current episode index
+ current_episode = episode_idx
+ else:
+ # We are still in the same episode, so there is nothing for us to do here
+ pass
+ # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list
+ episode_data_index["to"].append(idx + 1)
+
+ for k in ["from", "to"]:
+ episode_data_index[k] = torch.tensor(episode_data_index[k])
+
+ return episode_data_index
diff --git a/lerobot/common/datasets/sampler.py b/src/lerobot/datasets/sampler.py
similarity index 96%
rename from lerobot/common/datasets/sampler.py
rename to src/lerobot/datasets/sampler.py
index 2f6c15c150..79ac7a4b22 100644
--- a/lerobot/common/datasets/sampler.py
+++ b/src/lerobot/datasets/sampler.py
@@ -13,7 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Iterator, Union
+from collections.abc import Iterator
import torch
@@ -22,7 +22,7 @@ class EpisodeAwareSampler:
def __init__(
self,
episode_data_index: dict,
- episode_indices_to_use: Union[list, None] = None,
+ episode_indices_to_use: list | None = None,
drop_n_first_frames: int = 0,
drop_n_last_frames: int = 0,
shuffle: bool = False,
diff --git a/src/lerobot/datasets/transforms.py b/src/lerobot/datasets/transforms.py
new file mode 100644
index 0000000000..f992275b7c
--- /dev/null
+++ b/src/lerobot/datasets/transforms.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import collections
+from collections.abc import Callable, Sequence
+from dataclasses import dataclass, field
+from typing import Any
+
+import torch
+from torchvision.transforms import v2
+from torchvision.transforms.v2 import (
+ Transform,
+ functional as F, # noqa: N812
+)
+
+
+class RandomSubsetApply(Transform):
+ """Apply a random subset of N transformations from a list of transformations.
+
+ Args:
+ transforms: list of transformations.
+ p: represents the multinomial probabilities (with no replacement) used for sampling the transform.
+ If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms
+ have the same probability.
+ n_subset: number of transformations to apply. If ``None``, all transforms are applied.
+ Must be in [1, len(transforms)].
+ random_order: apply transformations in a random order.
+ """
+
+ def __init__(
+ self,
+ transforms: Sequence[Callable],
+ p: list[float] | None = None,
+ n_subset: int | None = None,
+ random_order: bool = False,
+ ) -> None:
+ super().__init__()
+ if not isinstance(transforms, Sequence):
+ raise TypeError("Argument transforms should be a sequence of callables")
+ if p is None:
+ p = [1] * len(transforms)
+ elif len(p) != len(transforms):
+ raise ValueError(
+ f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}"
+ )
+
+ if n_subset is None:
+ n_subset = len(transforms)
+ elif not isinstance(n_subset, int):
+ raise TypeError("n_subset should be an int or None")
+ elif not (1 <= n_subset <= len(transforms)):
+ raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]")
+
+ self.transforms = transforms
+ total = sum(p)
+ self.p = [prob / total for prob in p]
+ self.n_subset = n_subset
+ self.random_order = random_order
+
+ self.selected_transforms = None
+
+ def forward(self, *inputs: Any) -> Any:
+ needs_unpacking = len(inputs) > 1
+
+ selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset)
+ if not self.random_order:
+ selected_indices = selected_indices.sort().values
+
+ self.selected_transforms = [self.transforms[i] for i in selected_indices]
+
+ for transform in self.selected_transforms:
+ outputs = transform(*inputs)
+ inputs = outputs if needs_unpacking else (outputs,)
+
+ return outputs
+
+ def extra_repr(self) -> str:
+ return (
+ f"transforms={self.transforms}, "
+ f"p={self.p}, "
+ f"n_subset={self.n_subset}, "
+ f"random_order={self.random_order}"
+ )
+
+
+class SharpnessJitter(Transform):
+ """Randomly change the sharpness of an image or video.
+
+ Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly.
+ While v2.RandomAdjustSharpness applies — with a given probability — a fixed sharpness_factor to an image,
+ SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of
+ augmentations as a result.
+
+ A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness
+ by a factor of 2.
+
+ If the input is a :class:`torch.Tensor`,
+ it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
+
+ Args:
+ sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from
+ [max(0, 1 - sharpness), 1 + sharpness] or the given
+ [min, max]. Should be non negative numbers.
+ """
+
+ def __init__(self, sharpness: float | Sequence[float]) -> None:
+ super().__init__()
+ self.sharpness = self._check_input(sharpness)
+
+ def _check_input(self, sharpness):
+ if isinstance(sharpness, (int, float)):
+ if sharpness < 0:
+ raise ValueError("If sharpness is a single number, it must be non negative.")
+ sharpness = [1.0 - sharpness, 1.0 + sharpness]
+ sharpness[0] = max(sharpness[0], 0.0)
+ elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2:
+ sharpness = [float(v) for v in sharpness]
+ else:
+ raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.")
+
+ if not 0.0 <= sharpness[0] <= sharpness[1]:
+ raise ValueError(f"sharpness values should be between (0., inf), but got {sharpness}.")
+
+ return float(sharpness[0]), float(sharpness[1])
+
+ def make_params(self, flat_inputs: list[Any]) -> dict[str, Any]:
+ sharpness_factor = torch.empty(1).uniform_(self.sharpness[0], self.sharpness[1]).item()
+ return {"sharpness_factor": sharpness_factor}
+
+ def transform(self, inpt: Any, params: dict[str, Any]) -> Any:
+ sharpness_factor = params["sharpness_factor"]
+ return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor)
+
+
+@dataclass
+class ImageTransformConfig:
+ """
+ For each transform, the following parameters are available:
+ weight: This represents the multinomial probability (with no replacement)
+ used for sampling the transform. If the sum of the weights is not 1,
+ they will be normalized.
+ type: The name of the class used. This is either a class available under torchvision.transforms.v2 or a
+ custom transform defined here.
+ kwargs: Lower & upper bound respectively used for sampling the transform's parameter
+ (following uniform distribution) when it's applied.
+ """
+
+ weight: float = 1.0
+ type: str = "Identity"
+ kwargs: dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class ImageTransformsConfig:
+ """
+ These transforms are all using standard torchvision.transforms.v2
+ You can find out how these transformations affect images here:
+ https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html
+ We use a custom RandomSubsetApply container to sample them.
+ """
+
+ # Set this flag to `true` to enable transforms during training
+ enable: bool = False
+ # This is the maximum number of transforms (sampled from these below) that will be applied to each frame.
+ # It's an integer in the interval [1, number_of_available_transforms].
+ max_num_transforms: int = 3
+ # By default, transforms are applied in Torchvision's suggested order (shown below).
+ # Set this to True to apply them in a random order.
+ random_order: bool = False
+ tfs: dict[str, ImageTransformConfig] = field(
+ default_factory=lambda: {
+ "brightness": ImageTransformConfig(
+ weight=1.0,
+ type="ColorJitter",
+ kwargs={"brightness": (0.8, 1.2)},
+ ),
+ "contrast": ImageTransformConfig(
+ weight=1.0,
+ type="ColorJitter",
+ kwargs={"contrast": (0.8, 1.2)},
+ ),
+ "saturation": ImageTransformConfig(
+ weight=1.0,
+ type="ColorJitter",
+ kwargs={"saturation": (0.5, 1.5)},
+ ),
+ "hue": ImageTransformConfig(
+ weight=1.0,
+ type="ColorJitter",
+ kwargs={"hue": (-0.05, 0.05)},
+ ),
+ "sharpness": ImageTransformConfig(
+ weight=1.0,
+ type="SharpnessJitter",
+ kwargs={"sharpness": (0.5, 1.5)},
+ ),
+ }
+ )
+
+
+def make_transform_from_config(cfg: ImageTransformConfig):
+ if cfg.type == "Identity":
+ return v2.Identity(**cfg.kwargs)
+ elif cfg.type == "ColorJitter":
+ return v2.ColorJitter(**cfg.kwargs)
+ elif cfg.type == "SharpnessJitter":
+ return SharpnessJitter(**cfg.kwargs)
+ else:
+ raise ValueError(f"Transform '{cfg.type}' is not valid.")
+
+
+class ImageTransforms(Transform):
+ """A class to compose image transforms based on configuration."""
+
+ def __init__(self, cfg: ImageTransformsConfig) -> None:
+ super().__init__()
+ self._cfg = cfg
+
+ self.weights = []
+ self.transforms = {}
+ for tf_name, tf_cfg in cfg.tfs.items():
+ if tf_cfg.weight <= 0.0:
+ continue
+
+ self.transforms[tf_name] = make_transform_from_config(tf_cfg)
+ self.weights.append(tf_cfg.weight)
+
+ n_subset = min(len(self.transforms), cfg.max_num_transforms)
+ if n_subset == 0 or not cfg.enable:
+ self.tf = v2.Identity()
+ else:
+ self.tf = RandomSubsetApply(
+ transforms=list(self.transforms.values()),
+ p=self.weights,
+ n_subset=n_subset,
+ random_order=cfg.random_order,
+ )
+
+ def forward(self, *inputs: Any) -> Any:
+ return self.tf(*inputs)
diff --git a/src/lerobot/datasets/utils.py b/src/lerobot/datasets/utils.py
new file mode 100644
index 0000000000..ac0ab97999
--- /dev/null
+++ b/src/lerobot/datasets/utils.py
@@ -0,0 +1,860 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import contextlib
+import importlib.resources
+import json
+import logging
+from collections.abc import Iterator
+from itertools import accumulate
+from pathlib import Path
+from pprint import pformat
+from types import SimpleNamespace
+from typing import Any
+
+import datasets
+import jsonlines
+import numpy as np
+import packaging.version
+import torch
+from datasets.table import embed_table_storage
+from huggingface_hub import DatasetCard, DatasetCardData, HfApi
+from huggingface_hub.errors import RevisionNotFoundError
+from PIL import Image as PILImage
+from torchvision import transforms
+
+from lerobot.configs.types import DictLike, FeatureType, PolicyFeature
+from lerobot.datasets.backward_compatibility import (
+ V21_MESSAGE,
+ BackwardCompatibilityError,
+ ForwardCompatibilityError,
+)
+from lerobot.robots import Robot
+from lerobot.utils.utils import is_valid_numpy_dtype_string
+
+DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk
+
+INFO_PATH = "meta/info.json"
+EPISODES_PATH = "meta/episodes.jsonl"
+STATS_PATH = "meta/stats.json"
+EPISODES_STATS_PATH = "meta/episodes_stats.jsonl"
+TASKS_PATH = "meta/tasks.jsonl"
+
+DEFAULT_VIDEO_PATH = "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4"
+DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet"
+DEFAULT_IMAGE_PATH = "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.png"
+
+DATASET_CARD_TEMPLATE = """
+---
+# Metadata will go there
+---
+This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
+
+## {}
+
+"""
+
+DEFAULT_FEATURES = {
+ "timestamp": {"dtype": "float32", "shape": (1,), "names": None},
+ "frame_index": {"dtype": "int64", "shape": (1,), "names": None},
+ "episode_index": {"dtype": "int64", "shape": (1,), "names": None},
+ "index": {"dtype": "int64", "shape": (1,), "names": None},
+ "task_index": {"dtype": "int64", "shape": (1,), "names": None},
+}
+
+
+def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict:
+ """Flatten a nested dictionary structure by collapsing nested keys into one key with a separator.
+
+ For example:
+ ```
+ >>> dct = {"a": {"b": 1, "c": {"d": 2}}, "e": 3}`
+ >>> print(flatten_dict(dct))
+ {"a/b": 1, "a/c/d": 2, "e": 3}
+ """
+ items = []
+ for k, v in d.items():
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
+ if isinstance(v, dict):
+ items.extend(flatten_dict(v, new_key, sep=sep).items())
+ else:
+ items.append((new_key, v))
+ return dict(items)
+
+
+def unflatten_dict(d: dict, sep: str = "/") -> dict:
+ outdict = {}
+ for key, value in d.items():
+ parts = key.split(sep)
+ d = outdict
+ for part in parts[:-1]:
+ if part not in d:
+ d[part] = {}
+ d = d[part]
+ d[parts[-1]] = value
+ return outdict
+
+
+def get_nested_item(obj: DictLike, flattened_key: str, sep: str = "/") -> Any:
+ split_keys = flattened_key.split(sep)
+ getter = obj[split_keys[0]]
+ if len(split_keys) == 1:
+ return getter
+
+ for key in split_keys[1:]:
+ getter = getter[key]
+
+ return getter
+
+
+def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict:
+ serialized_dict = {}
+ for key, value in flatten_dict(stats).items():
+ if isinstance(value, (torch.Tensor, np.ndarray)):
+ serialized_dict[key] = value.tolist()
+ elif isinstance(value, np.generic):
+ serialized_dict[key] = value.item()
+ elif isinstance(value, (int, float)):
+ serialized_dict[key] = value
+ else:
+ raise NotImplementedError(f"The value '{value}' of type '{type(value)}' is not supported.")
+ return unflatten_dict(serialized_dict)
+
+
+def embed_images(dataset: datasets.Dataset) -> datasets.Dataset:
+ # Embed image bytes into the table before saving to parquet
+ format = dataset.format
+ dataset = dataset.with_format("arrow")
+ dataset = dataset.map(embed_table_storage, batched=False)
+ dataset = dataset.with_format(**format)
+ return dataset
+
+
+def load_json(fpath: Path) -> Any:
+ with open(fpath) as f:
+ return json.load(f)
+
+
+def write_json(data: dict, fpath: Path) -> None:
+ fpath.parent.mkdir(exist_ok=True, parents=True)
+ with open(fpath, "w") as f:
+ json.dump(data, f, indent=4, ensure_ascii=False)
+
+
+def load_jsonlines(fpath: Path) -> list[Any]:
+ with jsonlines.open(fpath, "r") as reader:
+ return list(reader)
+
+
+def write_jsonlines(data: dict, fpath: Path) -> None:
+ fpath.parent.mkdir(exist_ok=True, parents=True)
+ with jsonlines.open(fpath, "w") as writer:
+ writer.write_all(data)
+
+
+def append_jsonlines(data: dict, fpath: Path) -> None:
+ fpath.parent.mkdir(exist_ok=True, parents=True)
+ with jsonlines.open(fpath, "a") as writer:
+ writer.write(data)
+
+
+def write_info(info: dict, local_dir: Path):
+ write_json(info, local_dir / INFO_PATH)
+
+
+def load_info(local_dir: Path) -> dict:
+ info = load_json(local_dir / INFO_PATH)
+ for ft in info["features"].values():
+ ft["shape"] = tuple(ft["shape"])
+ return info
+
+
+def write_stats(stats: dict, local_dir: Path):
+ serialized_stats = serialize_dict(stats)
+ write_json(serialized_stats, local_dir / STATS_PATH)
+
+
+def cast_stats_to_numpy(stats) -> dict[str, dict[str, np.ndarray]]:
+ stats = {key: np.array(value) for key, value in flatten_dict(stats).items()}
+ return unflatten_dict(stats)
+
+
+def load_stats(local_dir: Path) -> dict[str, dict[str, np.ndarray]]:
+ if not (local_dir / STATS_PATH).exists():
+ return None
+ stats = load_json(local_dir / STATS_PATH)
+ return cast_stats_to_numpy(stats)
+
+
+def write_task(task_index: int, task: dict, local_dir: Path):
+ task_dict = {
+ "task_index": task_index,
+ "task": task,
+ }
+ append_jsonlines(task_dict, local_dir / TASKS_PATH)
+
+
+def load_tasks(local_dir: Path) -> tuple[dict, dict]:
+ tasks = load_jsonlines(local_dir / TASKS_PATH)
+ tasks = {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])}
+ task_to_task_index = {task: task_index for task_index, task in tasks.items()}
+ return tasks, task_to_task_index
+
+
+def write_episode(episode: dict, local_dir: Path):
+ append_jsonlines(episode, local_dir / EPISODES_PATH)
+
+
+def load_episodes(local_dir: Path) -> dict:
+ episodes = load_jsonlines(local_dir / EPISODES_PATH)
+ return {item["episode_index"]: item for item in sorted(episodes, key=lambda x: x["episode_index"])}
+
+
+def write_episode_stats(episode_index: int, episode_stats: dict, local_dir: Path):
+ # We wrap episode_stats in a dictionary since `episode_stats["episode_index"]`
+ # is a dictionary of stats and not an integer.
+ episode_stats = {"episode_index": episode_index, "stats": serialize_dict(episode_stats)}
+ append_jsonlines(episode_stats, local_dir / EPISODES_STATS_PATH)
+
+
+def load_episodes_stats(local_dir: Path) -> dict:
+ episodes_stats = load_jsonlines(local_dir / EPISODES_STATS_PATH)
+ return {
+ item["episode_index"]: cast_stats_to_numpy(item["stats"])
+ for item in sorted(episodes_stats, key=lambda x: x["episode_index"])
+ }
+
+
+def backward_compatible_episodes_stats(
+ stats: dict[str, dict[str, np.ndarray]], episodes: list[int]
+) -> dict[str, dict[str, np.ndarray]]:
+ return dict.fromkeys(episodes, stats)
+
+
+def load_image_as_numpy(
+ fpath: str | Path, dtype: np.dtype = np.float32, channel_first: bool = True
+) -> np.ndarray:
+ img = PILImage.open(fpath).convert("RGB")
+ img_array = np.array(img, dtype=dtype)
+ if channel_first: # (H, W, C) -> (C, H, W)
+ img_array = np.transpose(img_array, (2, 0, 1))
+ if np.issubdtype(dtype, np.floating):
+ img_array /= 255.0
+ return img_array
+
+
+def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
+ """Get a transform function that convert items from Hugging Face dataset (pyarrow)
+ to torch tensors. Importantly, images are converted from PIL, which corresponds to
+ a channel last representation (h w c) of uint8 type, to a torch image representation
+ with channel first (c h w) of float32 type in range [0,1].
+ """
+ for key in items_dict:
+ first_item = items_dict[key][0]
+ if isinstance(first_item, PILImage.Image):
+ to_tensor = transforms.ToTensor()
+ items_dict[key] = [to_tensor(img) for img in items_dict[key]]
+ elif first_item is None:
+ pass
+ else:
+ items_dict[key] = [x if isinstance(x, str) else torch.tensor(x) for x in items_dict[key]]
+ return items_dict
+
+
+def is_valid_version(version: str) -> bool:
+ try:
+ packaging.version.parse(version)
+ return True
+ except packaging.version.InvalidVersion:
+ return False
+
+
+def check_version_compatibility(
+ repo_id: str,
+ version_to_check: str | packaging.version.Version,
+ current_version: str | packaging.version.Version,
+ enforce_breaking_major: bool = True,
+) -> None:
+ v_check = (
+ packaging.version.parse(version_to_check)
+ if not isinstance(version_to_check, packaging.version.Version)
+ else version_to_check
+ )
+ v_current = (
+ packaging.version.parse(current_version)
+ if not isinstance(current_version, packaging.version.Version)
+ else current_version
+ )
+ if v_check.major < v_current.major and enforce_breaking_major:
+ raise BackwardCompatibilityError(repo_id, v_check)
+ elif v_check.minor < v_current.minor:
+ logging.warning(V21_MESSAGE.format(repo_id=repo_id, version=v_check))
+
+
+def get_repo_versions(repo_id: str) -> list[packaging.version.Version]:
+ """Returns available valid versions (branches and tags) on given repo."""
+ api = HfApi()
+ repo_refs = api.list_repo_refs(repo_id, repo_type="dataset")
+ repo_refs = [b.name for b in repo_refs.branches + repo_refs.tags]
+ repo_versions = []
+ for ref in repo_refs:
+ with contextlib.suppress(packaging.version.InvalidVersion):
+ repo_versions.append(packaging.version.parse(ref))
+
+ return repo_versions
+
+
+def get_safe_version(repo_id: str, version: str | packaging.version.Version) -> str:
+ """
+ Returns the version if available on repo or the latest compatible one.
+ Otherwise, will throw a `CompatibilityError`.
+ """
+ target_version = (
+ packaging.version.parse(version) if not isinstance(version, packaging.version.Version) else version
+ )
+ hub_versions = get_repo_versions(repo_id)
+
+ if not hub_versions:
+ raise RevisionNotFoundError(
+ f"""Your dataset must be tagged with a codebase version.
+ Assuming _version_ is the codebase_version value in the info.json, you can run this:
+ ```python
+ from huggingface_hub import HfApi
+
+ hub_api = HfApi()
+ hub_api.create_tag("{repo_id}", tag="_version_", repo_type="dataset")
+ ```
+ """
+ )
+
+ if target_version in hub_versions:
+ return f"v{target_version}"
+
+ compatibles = [
+ v for v in hub_versions if v.major == target_version.major and v.minor <= target_version.minor
+ ]
+ if compatibles:
+ return_version = max(compatibles)
+ if return_version < target_version:
+ logging.warning(f"Revision {version} for {repo_id} not found, using version v{return_version}")
+ return f"v{return_version}"
+
+ lower_major = [v for v in hub_versions if v.major < target_version.major]
+ if lower_major:
+ raise BackwardCompatibilityError(repo_id, max(lower_major))
+
+ upper_versions = [v for v in hub_versions if v > target_version]
+ assert len(upper_versions) > 0
+ raise ForwardCompatibilityError(repo_id, min(upper_versions))
+
+
+def get_hf_features_from_features(features: dict) -> datasets.Features:
+ hf_features = {}
+ for key, ft in features.items():
+ if ft["dtype"] == "video":
+ continue
+ elif ft["dtype"] == "image":
+ hf_features[key] = datasets.Image()
+ elif ft["shape"] == (1,):
+ hf_features[key] = datasets.Value(dtype=ft["dtype"])
+ elif len(ft["shape"]) == 1:
+ hf_features[key] = datasets.Sequence(
+ length=ft["shape"][0], feature=datasets.Value(dtype=ft["dtype"])
+ )
+ elif len(ft["shape"]) == 2:
+ hf_features[key] = datasets.Array2D(shape=ft["shape"], dtype=ft["dtype"])
+ elif len(ft["shape"]) == 3:
+ hf_features[key] = datasets.Array3D(shape=ft["shape"], dtype=ft["dtype"])
+ elif len(ft["shape"]) == 4:
+ hf_features[key] = datasets.Array4D(shape=ft["shape"], dtype=ft["dtype"])
+ elif len(ft["shape"]) == 5:
+ hf_features[key] = datasets.Array5D(shape=ft["shape"], dtype=ft["dtype"])
+ else:
+ raise ValueError(f"Corresponding feature is not valid: {ft}")
+
+ return datasets.Features(hf_features)
+
+
+def _validate_feature_names(features: dict[str, dict]) -> None:
+ invalid_features = {name: ft for name, ft in features.items() if "/" in name}
+ if invalid_features:
+ raise ValueError(f"Feature names should not contain '/'. Found '/' in '{invalid_features}'.")
+
+
+def hw_to_dataset_features(
+ hw_features: dict[str, type | tuple], prefix: str, use_video: bool = True
+) -> dict[str, dict]:
+ features = {}
+ joint_fts = {key: ftype for key, ftype in hw_features.items() if ftype is float}
+ cam_fts = {key: shape for key, shape in hw_features.items() if isinstance(shape, tuple)}
+
+ if joint_fts and prefix == "action":
+ features[prefix] = {
+ "dtype": "float32",
+ "shape": (len(joint_fts),),
+ "names": list(joint_fts),
+ }
+
+ if joint_fts and prefix == "observation":
+ features[f"{prefix}.state"] = {
+ "dtype": "float32",
+ "shape": (len(joint_fts),),
+ "names": list(joint_fts),
+ }
+
+ for key, shape in cam_fts.items():
+ features[f"{prefix}.images.{key}"] = {
+ "dtype": "video" if use_video else "image",
+ "shape": shape,
+ "names": ["height", "width", "channels"],
+ }
+
+ _validate_feature_names(features)
+ return features
+
+
+def build_dataset_frame(
+ ds_features: dict[str, dict], values: dict[str, Any], prefix: str
+) -> dict[str, np.ndarray]:
+ frame = {}
+ for key, ft in ds_features.items():
+ if key in DEFAULT_FEATURES or not key.startswith(prefix):
+ continue
+ elif ft["dtype"] == "float32" and len(ft["shape"]) == 1:
+ frame[key] = np.array([values[name] for name in ft["names"]], dtype=np.float32)
+ elif ft["dtype"] in ["image", "video"]:
+ frame[key] = values[key.removeprefix(f"{prefix}.images.")]
+
+ return frame
+
+
+def get_features_from_robot(robot: Robot, use_videos: bool = True) -> dict:
+ camera_ft = {}
+ if robot.cameras:
+ camera_ft = {
+ key: {"dtype": "video" if use_videos else "image", **ft}
+ for key, ft in robot.camera_features.items()
+ }
+ return {**robot.motor_features, **camera_ft, **DEFAULT_FEATURES}
+
+
+def dataset_to_policy_features(features: dict[str, dict]) -> dict[str, PolicyFeature]:
+ # TODO(aliberts): Implement "type" in dataset features and simplify this
+ policy_features = {}
+ for key, ft in features.items():
+ shape = ft["shape"]
+ if ft["dtype"] in ["image", "video"]:
+ type = FeatureType.VISUAL
+ if len(shape) != 3:
+ raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})")
+
+ names = ft["names"]
+ # Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets.
+ if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w)
+ shape = (shape[2], shape[0], shape[1])
+ elif key == "observation.environment_state":
+ type = FeatureType.ENV
+ elif key.startswith("observation"):
+ type = FeatureType.STATE
+ elif key.startswith("action"):
+ type = FeatureType.ACTION
+ else:
+ continue
+
+ policy_features[key] = PolicyFeature(
+ type=type,
+ shape=shape,
+ )
+
+ return policy_features
+
+
+def create_empty_dataset_info(
+ codebase_version: str,
+ fps: int,
+ features: dict,
+ use_videos: bool,
+ robot_type: str | None = None,
+) -> dict:
+ return {
+ "codebase_version": codebase_version,
+ "robot_type": robot_type,
+ "total_episodes": 0,
+ "total_frames": 0,
+ "total_tasks": 0,
+ "total_videos": 0,
+ "total_chunks": 0,
+ "chunks_size": DEFAULT_CHUNK_SIZE,
+ "fps": fps,
+ "splits": {},
+ "data_path": DEFAULT_PARQUET_PATH,
+ "video_path": DEFAULT_VIDEO_PATH if use_videos else None,
+ "features": features,
+ }
+
+
+def get_episode_data_index(
+ episode_dicts: dict[dict], episodes: list[int] | None = None
+) -> dict[str, torch.Tensor]:
+ episode_lengths = {ep_idx: ep_dict["length"] for ep_idx, ep_dict in episode_dicts.items()}
+ if episodes is not None:
+ episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes}
+
+ cumulative_lengths = list(accumulate(episode_lengths.values()))
+ return {
+ "from": torch.LongTensor([0] + cumulative_lengths[:-1]),
+ "to": torch.LongTensor(cumulative_lengths),
+ }
+
+
+def check_timestamps_sync(
+ timestamps: np.ndarray,
+ episode_indices: np.ndarray,
+ episode_data_index: dict[str, np.ndarray],
+ fps: int,
+ tolerance_s: float,
+ raise_value_error: bool = True,
+) -> bool:
+ """
+ This check is to make sure that each timestamp is separated from the next by (1/fps) +/- tolerance
+ to account for possible numerical error.
+
+ Args:
+ timestamps (np.ndarray): Array of timestamps in seconds.
+ episode_indices (np.ndarray): Array indicating the episode index for each timestamp.
+ episode_data_index (dict[str, np.ndarray]): A dictionary that includes 'to',
+ which identifies indices for the end of each episode.
+ fps (int): Frames per second. Used to check the expected difference between consecutive timestamps.
+ tolerance_s (float): Allowed deviation from the expected (1/fps) difference.
+ raise_value_error (bool): Whether to raise a ValueError if the check fails.
+
+ Returns:
+ bool: True if all checked timestamp differences lie within tolerance, False otherwise.
+
+ Raises:
+ ValueError: If the check fails and `raise_value_error` is True.
+ """
+ if timestamps.shape != episode_indices.shape:
+ raise ValueError(
+ "timestamps and episode_indices should have the same shape. "
+ f"Found {timestamps.shape=} and {episode_indices.shape=}."
+ )
+
+ # Consecutive differences
+ diffs = np.diff(timestamps)
+ within_tolerance = np.abs(diffs - (1.0 / fps)) <= tolerance_s
+
+ # Mask to ignore differences at the boundaries between episodes
+ mask = np.ones(len(diffs), dtype=bool)
+ ignored_diffs = episode_data_index["to"][:-1] - 1 # indices at the end of each episode
+ mask[ignored_diffs] = False
+ filtered_within_tolerance = within_tolerance[mask]
+
+ # Check if all remaining diffs are within tolerance
+ if not np.all(filtered_within_tolerance):
+ # Track original indices before masking
+ original_indices = np.arange(len(diffs))
+ filtered_indices = original_indices[mask]
+ outside_tolerance_filtered_indices = np.nonzero(~filtered_within_tolerance)[0]
+ outside_tolerance_indices = filtered_indices[outside_tolerance_filtered_indices]
+
+ outside_tolerances = []
+ for idx in outside_tolerance_indices:
+ entry = {
+ "timestamps": [timestamps[idx], timestamps[idx + 1]],
+ "diff": diffs[idx],
+ "episode_index": episode_indices[idx].item()
+ if hasattr(episode_indices[idx], "item")
+ else episode_indices[idx],
+ }
+ outside_tolerances.append(entry)
+
+ if raise_value_error:
+ raise ValueError(
+ f"""One or several timestamps unexpectedly violate the tolerance inside episode range.
+ This might be due to synchronization issues during data collection.
+ \n{pformat(outside_tolerances)}"""
+ )
+ return False
+
+ return True
+
+
+def check_delta_timestamps(
+ delta_timestamps: dict[str, list[float]], fps: int, tolerance_s: float, raise_value_error: bool = True
+) -> bool:
+ """This will check if all the values in delta_timestamps are multiples of 1/fps +/- tolerance.
+ This is to ensure that these delta_timestamps added to any timestamp from a dataset will themselves be
+ actual timestamps from the dataset.
+ """
+ outside_tolerance = {}
+ for key, delta_ts in delta_timestamps.items():
+ within_tolerance = [abs(ts * fps - round(ts * fps)) / fps <= tolerance_s for ts in delta_ts]
+ if not all(within_tolerance):
+ outside_tolerance[key] = [
+ ts for ts, is_within in zip(delta_ts, within_tolerance, strict=True) if not is_within
+ ]
+
+ if len(outside_tolerance) > 0:
+ if raise_value_error:
+ raise ValueError(
+ f"""
+ The following delta_timestamps are found outside of tolerance range.
+ Please make sure they are multiples of 1/{fps} +/- tolerance and adjust
+ their values accordingly.
+ \n{pformat(outside_tolerance)}
+ """
+ )
+ return False
+
+ return True
+
+
+def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]:
+ delta_indices = {}
+ for key, delta_ts in delta_timestamps.items():
+ delta_indices[key] = [round(d * fps) for d in delta_ts]
+
+ return delta_indices
+
+
+def cycle(iterable):
+ """The equivalent of itertools.cycle, but safe for Pytorch dataloaders.
+
+ See https://github.com/pytorch/pytorch/issues/23900 for information on why itertools.cycle is not safe.
+ """
+ iterator = iter(iterable)
+ while True:
+ try:
+ yield next(iterator)
+ except StopIteration:
+ iterator = iter(iterable)
+
+
+def create_branch(repo_id, *, branch: str, repo_type: str | None = None) -> None:
+ """Create a branch on a existing Hugging Face repo. Delete the branch if it already
+ exists before creating it.
+ """
+ api = HfApi()
+
+ branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches
+ refs = [branch.ref for branch in branches]
+ ref = f"refs/heads/{branch}"
+ if ref in refs:
+ api.delete_branch(repo_id, repo_type=repo_type, branch=branch)
+
+ api.create_branch(repo_id, repo_type=repo_type, branch=branch)
+
+
+def create_lerobot_dataset_card(
+ tags: list | None = None,
+ dataset_info: dict | None = None,
+ **kwargs,
+) -> DatasetCard:
+ """
+ Keyword arguments will be used to replace values in src/lerobot/datasets/card_template.md.
+ Note: If specified, license must be one of https://huggingface.co/docs/hub/repositories-licenses.
+ """
+ card_tags = ["LeRobot"]
+
+ if tags:
+ card_tags += tags
+ if dataset_info:
+ dataset_structure = "[meta/info.json](meta/info.json):\n"
+ dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n"
+ kwargs = {**kwargs, "dataset_structure": dataset_structure}
+ card_data = DatasetCardData(
+ license=kwargs.get("license"),
+ tags=card_tags,
+ task_categories=["robotics"],
+ configs=[
+ {
+ "config_name": "default",
+ "data_files": "data/*/*.parquet",
+ }
+ ],
+ )
+
+ card_template = (importlib.resources.files("lerobot.datasets") / "card_template.md").read_text()
+
+ return DatasetCard.from_template(
+ card_data=card_data,
+ template_str=card_template,
+ **kwargs,
+ )
+
+
+class IterableNamespace(SimpleNamespace):
+ """
+ A namespace object that supports both dictionary-like iteration and dot notation access.
+ Automatically converts nested dictionaries into IterableNamespaces.
+
+ This class extends SimpleNamespace to provide:
+ - Dictionary-style iteration over keys
+ - Access to items via both dot notation (obj.key) and brackets (obj["key"])
+ - Dictionary-like methods: items(), keys(), values()
+ - Recursive conversion of nested dictionaries
+
+ Args:
+ dictionary: Optional dictionary to initialize the namespace
+ **kwargs: Additional keyword arguments passed to SimpleNamespace
+
+ Examples:
+ >>> data = {"name": "Alice", "details": {"age": 25}}
+ >>> ns = IterableNamespace(data)
+ >>> ns.name
+ 'Alice'
+ >>> ns.details.age
+ 25
+ >>> list(ns.keys())
+ ['name', 'details']
+ >>> for key, value in ns.items():
+ ... print(f"{key}: {value}")
+ name: Alice
+ details: IterableNamespace(age=25)
+ """
+
+ def __init__(self, dictionary: dict[str, Any] = None, **kwargs):
+ super().__init__(**kwargs)
+ if dictionary is not None:
+ for key, value in dictionary.items():
+ if isinstance(value, dict):
+ setattr(self, key, IterableNamespace(value))
+ else:
+ setattr(self, key, value)
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(vars(self))
+
+ def __getitem__(self, key: str) -> Any:
+ return vars(self)[key]
+
+ def items(self):
+ return vars(self).items()
+
+ def values(self):
+ return vars(self).values()
+
+ def keys(self):
+ return vars(self).keys()
+
+
+def validate_frame(frame: dict, features: dict):
+ expected_features = set(features) - set(DEFAULT_FEATURES)
+ actual_features = set(frame)
+
+ error_message = validate_features_presence(actual_features, expected_features)
+
+ common_features = actual_features & expected_features
+ for name in common_features - {"task"}:
+ error_message += validate_feature_dtype_and_shape(name, features[name], frame[name])
+
+ if error_message:
+ raise ValueError(error_message)
+
+
+def validate_features_presence(actual_features: set[str], expected_features: set[str]):
+ error_message = ""
+ missing_features = expected_features - actual_features
+ extra_features = actual_features - expected_features
+
+ if missing_features or extra_features:
+ error_message += "Feature mismatch in `frame` dictionary:\n"
+ if missing_features:
+ error_message += f"Missing features: {missing_features}\n"
+ if extra_features:
+ error_message += f"Extra features: {extra_features}\n"
+
+ return error_message
+
+
+def validate_feature_dtype_and_shape(name: str, feature: dict, value: np.ndarray | PILImage.Image | str):
+ expected_dtype = feature["dtype"]
+ expected_shape = feature["shape"]
+ if is_valid_numpy_dtype_string(expected_dtype):
+ return validate_feature_numpy_array(name, expected_dtype, expected_shape, value)
+ elif expected_dtype in ["image", "video"]:
+ return validate_feature_image_or_video(name, expected_shape, value)
+ elif expected_dtype == "string":
+ return validate_feature_string(name, value)
+ else:
+ raise NotImplementedError(f"The feature dtype '{expected_dtype}' is not implemented yet.")
+
+
+def validate_feature_numpy_array(
+ name: str, expected_dtype: str, expected_shape: list[int], value: np.ndarray
+):
+ error_message = ""
+ if isinstance(value, np.ndarray):
+ actual_dtype = value.dtype
+ actual_shape = value.shape
+
+ if actual_dtype != np.dtype(expected_dtype):
+ error_message += f"The feature '{name}' of dtype '{actual_dtype}' is not of the expected dtype '{expected_dtype}'.\n"
+
+ if actual_shape != expected_shape:
+ error_message += f"The feature '{name}' of shape '{actual_shape}' does not have the expected shape '{expected_shape}'.\n"
+ else:
+ error_message += f"The feature '{name}' is not a 'np.ndarray'. Expected type is '{expected_dtype}', but type '{type(value)}' provided instead.\n"
+
+ return error_message
+
+
+def validate_feature_image_or_video(name: str, expected_shape: list[str], value: np.ndarray | PILImage.Image):
+ # Note: The check of pixels range ([0,1] for float and [0,255] for uint8) is done by the image writer threads.
+ error_message = ""
+ if isinstance(value, np.ndarray):
+ actual_shape = value.shape
+ c, h, w = expected_shape
+ if len(actual_shape) != 3 or (actual_shape != (c, h, w) and actual_shape != (h, w, c)):
+ error_message += f"The feature '{name}' of shape '{actual_shape}' does not have the expected shape '{(c, h, w)}' or '{(h, w, c)}'.\n"
+ elif isinstance(value, PILImage.Image):
+ pass
+ else:
+ error_message += f"The feature '{name}' is expected to be of type 'PIL.Image' or 'np.ndarray' channel first or channel last, but type '{type(value)}' provided instead.\n"
+
+ return error_message
+
+
+def validate_feature_string(name: str, value: str):
+ if not isinstance(value, str):
+ return f"The feature '{name}' is expected to be of type 'str', but type '{type(value)}' provided instead.\n"
+ return ""
+
+
+def validate_episode_buffer(episode_buffer: dict, total_episodes: int, features: dict):
+ if "size" not in episode_buffer:
+ raise ValueError("size key not found in episode_buffer")
+
+ if "task" not in episode_buffer:
+ raise ValueError("task key not found in episode_buffer")
+
+ if episode_buffer["episode_index"] != total_episodes:
+ # TODO(aliberts): Add option to use existing episode_index
+ raise NotImplementedError(
+ "You might have manually provided the episode_buffer with an episode_index that doesn't "
+ "match the total number of episodes already in the dataset. This is not supported for now."
+ )
+
+ if episode_buffer["size"] == 0:
+ raise ValueError("You must add one or several frames with `add_frame` before calling `add_episode`.")
+
+ buffer_keys = set(episode_buffer.keys()) - {"task", "size"}
+ if not buffer_keys == set(features):
+ raise ValueError(
+ f"Features from `episode_buffer` don't match the ones in `features`."
+ f"In episode_buffer not in features: {buffer_keys - set(features)}"
+ f"In features not in episode_buffer: {set(features) - buffer_keys}"
+ )
diff --git a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py b/src/lerobot/datasets/v2/batch_convert_dataset_v1_to_v2.py
similarity index 93%
rename from lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py
rename to src/lerobot/datasets/v2/batch_convert_dataset_v1_to_v2.py
index eeeb8fe7a9..fa99c725e4 100644
--- a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py
+++ b/src/lerobot/datasets/v2/batch_convert_dataset_v1_to_v2.py
@@ -26,16 +26,17 @@
from textwrap import dedent
from lerobot import available_datasets
-from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset, parse_robot_config
+from lerobot.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset
+from lerobot.robots.aloha.configuration_aloha import AlohaRobotConfig
LOCAL_DIR = Path("data/")
-ALOHA_CONFIG = Path("lerobot/configs/robot/aloha.yaml")
+# spellchecker:off
ALOHA_MOBILE_INFO = {
- "robot_config": parse_robot_config(ALOHA_CONFIG),
+ "robot_config": AlohaRobotConfig(),
"license": "mit",
"url": "https://mobile-aloha.github.io/",
- "paper": "https://arxiv.org/abs/2401.02117",
+ "paper": "https://huggingface.co/papers/2401.02117",
"citation_bibtex": dedent(r"""
@inproceedings{fu2024mobile,
author = {Fu, Zipeng and Zhao, Tony Z. and Finn, Chelsea},
@@ -45,10 +46,10 @@
}""").lstrip(),
}
ALOHA_STATIC_INFO = {
- "robot_config": parse_robot_config(ALOHA_CONFIG),
+ "robot_config": AlohaRobotConfig(),
"license": "mit",
"url": "https://tonyzhaozh.github.io/aloha/",
- "paper": "https://arxiv.org/abs/2304.13705",
+ "paper": "https://huggingface.co/papers/2304.13705",
"citation_bibtex": dedent(r"""
@article{Zhao2023LearningFB,
title={Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware},
@@ -56,13 +57,13 @@
journal={RSS},
year={2023},
volume={abs/2304.13705},
- url={https://arxiv.org/abs/2304.13705}
+ url={https://huggingface.co/papers/2304.13705}
}""").lstrip(),
}
PUSHT_INFO = {
"license": "mit",
"url": "https://diffusion-policy.cs.columbia.edu/",
- "paper": "https://arxiv.org/abs/2303.04137v5",
+ "paper": "https://huggingface.co/papers/2303.04137",
"citation_bibtex": dedent(r"""
@article{chi2024diffusionpolicy,
author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
@@ -74,7 +75,7 @@
XARM_INFO = {
"license": "mit",
"url": "https://www.nicklashansen.com/td-mpc/",
- "paper": "https://arxiv.org/abs/2203.04955",
+ "paper": "https://huggingface.co/papers/2203.04955",
"citation_bibtex": dedent(r"""
@inproceedings{Hansen2022tdmpc,
title={Temporal Difference Learning for Model Predictive Control},
@@ -243,7 +244,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/BUDS-website/",
- "paper": "https://arxiv.org/abs/2109.13841",
+ "paper": "https://huggingface.co/papers/2109.13841",
"citation_bibtex": dedent(r"""
@article{zhu2022bottom,
title={Bottom-Up Skill Discovery From Unsegmented Demonstrations for Long-Horizon Robot Manipulation},
@@ -260,7 +261,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/sailor/",
- "paper": "https://arxiv.org/abs/2210.11435",
+ "paper": "https://huggingface.co/papers/2210.11435",
"citation_bibtex": dedent(r"""
@inproceedings{nasiriany2022sailor,
title={Learning and Retrieval from Prior Data for Skill-based Imitation Learning},
@@ -273,7 +274,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/sirius/",
- "paper": "https://arxiv.org/abs/2211.08416",
+ "paper": "https://huggingface.co/papers/2211.08416",
"citation_bibtex": dedent(r"""
@inproceedings{liu2022robot,
title = {Robot Learning on the Job: Human-in-the-Loop Autonomy and Learning During Deployment},
@@ -297,14 +298,14 @@
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://sites.google.com/view/cablerouting/home",
- "paper": "https://arxiv.org/abs/2307.08927",
+ "paper": "https://huggingface.co/papers/2307.08927",
"citation_bibtex": dedent(r"""
@article{luo2023multistage,
author = {Jianlan Luo and Charles Xu and Xinyang Geng and Gilbert Feng and Kuan Fang and Liam Tan and Stefan Schaal and Sergey Levine},
title = {Multi-Stage Cable Routing through Hierarchical Imitation Learning},
journal = {arXiv pre-print},
year = {2023},
- url = {https://arxiv.org/abs/2307.08927},
+ url = {https://huggingface.co/papers/2307.08927},
}""").lstrip(),
},
"berkeley_fanuc_manipulation": {
@@ -321,7 +322,7 @@
"berkeley_gnm_cory_hall": {
"tasks_col": "language_instruction",
"license": "mit",
- "paper": "https://arxiv.org/abs/1709.10489",
+ "paper": "https://huggingface.co/papers/1709.10489",
"citation_bibtex": dedent(r"""
@inproceedings{kahn2018self,
title={Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation},
@@ -336,7 +337,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/recon-robot",
- "paper": "https://arxiv.org/abs/2104.05859",
+ "paper": "https://huggingface.co/papers/2104.05859",
"citation_bibtex": dedent(r"""
@inproceedings{shah2021rapid,
title={Rapid Exploration for Open-World Navigation with Latent Goal Models},
@@ -350,7 +351,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/SACSoN-review",
- "paper": "https://arxiv.org/abs/2306.01874",
+ "paper": "https://huggingface.co/papers/2306.01874",
"citation_bibtex": dedent(r"""
@article{hirose2023sacson,
title={SACSoN: Scalable Autonomous Data Collection for Social Navigation},
@@ -362,7 +363,7 @@
"berkeley_mvp": {
"tasks_col": "language_instruction",
"license": "mit",
- "paper": "https://arxiv.org/abs/2203.06173",
+ "paper": "https://huggingface.co/papers/2203.06173",
"citation_bibtex": dedent(r"""
@InProceedings{Radosavovic2022,
title = {Real-World Robot Learning with Masked Visual Pre-training},
@@ -374,7 +375,7 @@
"berkeley_rpt": {
"tasks_col": "language_instruction",
"license": "mit",
- "paper": "https://arxiv.org/abs/2306.10007",
+ "paper": "https://huggingface.co/papers/2306.10007",
"citation_bibtex": dedent(r"""
@article{Radosavovic2023,
title={Robot Learning with Sensorimotor Pre-training},
@@ -387,7 +388,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://human-world-model.github.io/",
- "paper": "https://arxiv.org/abs/2308.10901",
+ "paper": "https://huggingface.co/papers/2308.10901",
"citation_bibtex": dedent(r"""
@inproceedings{mendonca2023structured,
title={Structured World Models from Human Videos},
@@ -400,7 +401,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://play-fusion.github.io/",
- "paper": "https://arxiv.org/abs/2312.04549",
+ "paper": "https://huggingface.co/papers/2312.04549",
"citation_bibtex": dedent(r"""
@inproceedings{chen2023playfusion,
title={PlayFusion: Skill Acquisition via Diffusion from Language-Annotated Play},
@@ -413,7 +414,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://robo-affordances.github.io/",
- "paper": "https://arxiv.org/abs/2304.08488",
+ "paper": "https://huggingface.co/papers/2304.08488",
"citation_bibtex": dedent(r"""
@inproceedings{bahl2023affordances,
title={Affordances from Human Videos as a Versatile Representation for Robotics},
@@ -432,7 +433,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://diffusion-policy.cs.columbia.edu/",
- "paper": "https://arxiv.org/abs/2303.04137v5",
+ "paper": "https://huggingface.co/papers/2303.04137",
"citation_bibtex": dedent(r"""
@inproceedings{chi2023diffusionpolicy,
title={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
@@ -504,7 +505,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://droid-dataset.github.io/",
- "paper": "https://arxiv.org/abs/2403.12945",
+ "paper": "https://huggingface.co/papers/2403.12945",
"citation_bibtex": dedent(r"""
@article{khazatsky2024droid,
title = {DROID: A Large-Scale In-The-Wild Robot Manipulation Dataset},
@@ -516,7 +517,7 @@
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://functional-manipulation-benchmark.github.io/",
- "paper": "https://arxiv.org/abs/2401.08553",
+ "paper": "https://huggingface.co/papers/2401.08553",
"citation_bibtex": dedent(r"""
@article{luo2024fmb,
title={FMB: a Functional Manipulation Benchmark for Generalizable Robotic Learning},
@@ -529,7 +530,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://openreview.net/forum?id=WuBv9-IGDUA",
- "paper": "https://arxiv.org/abs/2401.14502",
+ "paper": "https://huggingface.co/papers/2401.14502",
"citation_bibtex": dedent(r"""
@inproceedings{saxena2023multiresolution,
title={Multi-Resolution Sensing for Real-Time Control with Vision-Language Models},
@@ -574,7 +575,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://jyopari.github.io/VINN/",
- "paper": "https://arxiv.org/abs/2112.01511",
+ "paper": "https://huggingface.co/papers/2112.01511",
"citation_bibtex": dedent(r"""
@misc{pari2021surprising,
title={The Surprising Effectiveness of Representation Learning for Visual Imitation},
@@ -589,7 +590,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://play-to-policy.github.io/",
- "paper": "https://arxiv.org/abs/2210.10047",
+ "paper": "https://huggingface.co/papers/2210.10047",
"citation_bibtex": dedent(r"""
@article{cui2022play,
title = {From Play to Policy: Conditional Behavior Generation from Uncurated Robot Data},
@@ -602,7 +603,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://rot-robot.github.io/",
- "paper": "https://arxiv.org/abs/2206.15469",
+ "paper": "https://huggingface.co/papers/2206.15469",
"citation_bibtex": dedent(r"""
@inproceedings{haldar2023watch,
title={Watch and match: Supercharging imitation with regularized optimal transport},
@@ -632,7 +633,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/hydra-il-2023",
- "paper": "https://arxiv.org/abs/2306.17237",
+ "paper": "https://huggingface.co/papers/2306.17237",
"citation_bibtex": dedent(r"""
@article{belkhale2023hydra,
title={HYDRA: Hybrid Robot Actions for Imitation Learning},
@@ -645,21 +646,21 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/visionandtouch",
- "paper": "https://arxiv.org/abs/1810.10191",
+ "paper": "https://huggingface.co/papers/1810.10191",
"citation_bibtex": dedent(r"""
@inproceedings{lee2019icra,
title={Making sense of vision and touch: Self-supervised learning of multimodal representations for contact-rich tasks},
author={Lee, Michelle A and Zhu, Yuke and Srinivasan, Krishnan and Shah, Parth and Savarese, Silvio and Fei-Fei, Li and Garg, Animesh and Bohg, Jeannette},
booktitle={2019 IEEE International Conference on Robotics and Automation (ICRA)},
year={2019},
- url={https://arxiv.org/abs/1810.10191}
+ url={https://huggingface.co/papers/1810.10191}
}""").lstrip(),
},
"stanford_robocook": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://hshi74.github.io/robocook/",
- "paper": "https://arxiv.org/abs/2306.14447",
+ "paper": "https://huggingface.co/papers/2306.14447",
"citation_bibtex": dedent(r"""
@article{shi2023robocook,
title={RoboCook: Long-Horizon Elasto-Plastic Object Manipulation with Diverse Tools},
@@ -672,7 +673,7 @@
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://www.kaggle.com/datasets/oiermees/taco-robot",
- "paper": "https://arxiv.org/abs/2209.08959, https://arxiv.org/abs/2210.01911",
+ "paper": "https://huggingface.co/papers/2209.08959, https://huggingface.co/papers/2210.01911",
"citation_bibtex": dedent(r"""
@inproceedings{rosete2022tacorl,
author = {Erick Rosete-Beas and Oier Mees and Gabriel Kalweit and Joschka Boedecker and Wolfram Burgard},
@@ -692,7 +693,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "URL",
- "paper": "https://arxiv.org/abs/2107.05842",
+ "paper": "https://huggingface.co/papers/2107.05842",
"citation_bibtex": dedent(r"""
@Article{Osa22,
author = {Takayuki Osa},
@@ -708,7 +709,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://toto-benchmark.org/",
- "paper": "https://arxiv.org/abs/2306.00942",
+ "paper": "https://huggingface.co/papers/2306.00942",
"citation_bibtex": dedent(r"""
@inproceedings{zhou2023train,
author={Zhou, Gaoyue and Dean, Victoria and Srirama, Mohan Kumar and Rajeswaran, Aravind and Pari, Jyothish and Hatch, Kyle and Jain, Aryan and Yu, Tianhe and Abbeel, Pieter and Pinto, Lerrel and Finn, Chelsea and Gupta, Abhinav},
@@ -732,7 +733,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://owmcorl.github.io/#",
- "paper": "https://arxiv.org/abs/2310.16029",
+ "paper": "https://huggingface.co/papers/2310.16029",
"citation_bibtex": dedent(r"""
@preprint{Feng2023Finetuning,
title={Finetuning Offline World Models in the Real World},
@@ -744,7 +745,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://robopil.github.io/d3fields/",
- "paper": "https://arxiv.org/abs/2309.16118",
+ "paper": "https://huggingface.co/papers/2309.16118",
"citation_bibtex": dedent(r"""
@article{wang2023d3field,
title={D^3Field: Dynamic 3D Descriptor Fields for Generalizable Robotic Manipulation},
@@ -757,7 +758,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://uscresl.github.io/dmfd/",
- "paper": "https://arxiv.org/abs/2207.10148",
+ "paper": "https://huggingface.co/papers/2207.10148",
"citation_bibtex": dedent(r"""
@article{salhotra2022dmfd,
author={Salhotra, Gautam and Liu, I-Chun Arthur and Dominguez-Kuhne, Marcus and Sukhatme, Gaurav S.},
@@ -774,7 +775,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/MUTEX/",
- "paper": "https://arxiv.org/abs/2309.14320",
+ "paper": "https://huggingface.co/papers/2309.14320",
"citation_bibtex": dedent(r"""
@inproceedings{shah2023mutex,
title={{MUTEX}: Learning Unified Policies from Multimodal Task Specifications},
@@ -810,7 +811,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://saytap.github.io/",
- "paper": "https://arxiv.org/abs/2306.07580",
+ "paper": "https://huggingface.co/papers/2306.07580",
"citation_bibtex": dedent(r"""
@article{saytap2023,
author = {Yujin Tang and Wenhao Yu and Jie Tan and Heiga Zen and Aleksandra Faust and
@@ -846,7 +847,7 @@
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/VIOLA/",
- "paper": "https://arxiv.org/abs/2210.11339",
+ "paper": "https://huggingface.co/papers/2210.11339",
"citation_bibtex": dedent(r"""
@article{zhu2022viola,
title={VIOLA: Imitation Learning for Vision-Based Manipulation with Object Proposal Priors},
@@ -856,6 +857,7 @@
}""").lstrip(),
},
}
+# spellchecker:on
def batch_convert():
diff --git a/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py b/src/lerobot/datasets/v2/convert_dataset_v1_to_v2.py
similarity index 90%
rename from lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py
rename to src/lerobot/datasets/v2/convert_dataset_v1_to_v2.py
index bf135043b0..cddfc4c18b 100644
--- a/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py
+++ b/src/lerobot/datasets/v2/convert_dataset_v1_to_v2.py
@@ -17,7 +17,7 @@
"""
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to
2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English
-for each of the task performed in the dataset. This will allow to easily train models with task-conditionning.
+for each of the task performed in the dataset. This will allow to easily train models with task-conditioning.
We support 3 different scenarios for these tasks (see instructions below):
1. Single task dataset: all episodes of your dataset have the same single task.
@@ -38,7 +38,7 @@
Examples:
```bash
-python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
+python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \
--repo-id lerobot/aloha_sim_insertion_human_image \
--single-task "Insert the peg into the socket." \
--robot-config lerobot/configs/robot/aloha.yaml \
@@ -46,7 +46,7 @@
```
```bash
-python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
+python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \
--repo-id aliberts/koch_tutorial \
--single-task "Pick the Lego block and drop it in the box on the right." \
--robot-config lerobot/configs/robot/koch.yaml \
@@ -63,7 +63,7 @@
Example:
```bash
- python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
+ python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \
--repo-id lerobot/stanford_kuka_multimodal_dataset \
--tasks-col "language_instruction" \
--local-dir data
@@ -92,7 +92,7 @@
Example:
```bash
-python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
+python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \
--repo-id lerobot/stanford_kuka_multimodal_dataset \
--tasks-col "language_instruction" \
--local-dir data
@@ -119,7 +119,7 @@
from huggingface_hub.errors import EntryNotFoundError, HfHubHTTPError
from safetensors.torch import load_file
-from lerobot.common.datasets.utils import (
+from lerobot.datasets.utils import (
DEFAULT_CHUNK_SIZE,
DEFAULT_PARQUET_PATH,
DEFAULT_VIDEO_PATH,
@@ -130,18 +130,18 @@
create_branch,
create_lerobot_dataset_card,
flatten_dict,
- get_hub_safe_version,
+ get_safe_version,
load_json,
unflatten_dict,
write_json,
write_jsonlines,
)
-from lerobot.common.datasets.video_utils import (
+from lerobot.datasets.video_utils import (
VideoFrame, # noqa: F401
get_image_pixel_channels,
get_video_info,
)
-from lerobot.common.utils.utils import init_hydra_config
+from lerobot.robots import RobotConfig
V16 = "v1.6"
V20 = "v2.0"
@@ -152,19 +152,18 @@
V1_STATS_PATH = "meta_data/stats.safetensors"
-def parse_robot_config(config_path: Path, config_overrides: list[str] | None = None) -> tuple[str, dict]:
- robot_cfg = init_hydra_config(config_path, config_overrides)
- if robot_cfg["robot_type"] in ["aloha", "koch"]:
+def parse_robot_config(robot_cfg: RobotConfig) -> tuple[str, dict]:
+ if robot_cfg.type in ["aloha", "koch"]:
state_names = [
- f"{arm}_{motor}" if len(robot_cfg["follower_arms"]) > 1 else motor
- for arm in robot_cfg["follower_arms"]
- for motor in robot_cfg["follower_arms"][arm]["motors"]
+ f"{arm}_{motor}" if len(robot_cfg.follower_arms) > 1 else motor
+ for arm in robot_cfg.follower_arms
+ for motor in robot_cfg.follower_arms[arm].motors
]
action_names = [
# f"{arm}_{motor}" for arm in ["left", "right"] for motor in robot_cfg["leader_arms"][arm]["motors"]
- f"{arm}_{motor}" if len(robot_cfg["leader_arms"]) > 1 else motor
- for arm in robot_cfg["leader_arms"]
- for motor in robot_cfg["leader_arms"][arm]["motors"]
+ f"{arm}_{motor}" if len(robot_cfg.leader_arms) > 1 else motor
+ for arm in robot_cfg.leader_arms
+ for motor in robot_cfg.leader_arms[arm].motors
]
# elif robot_cfg["robot_type"] == "stretch3": TODO
else:
@@ -173,7 +172,7 @@ def parse_robot_config(config_path: Path, config_overrides: list[str] | None = N
)
return {
- "robot_type": robot_cfg["robot_type"],
+ "robot_type": robot_cfg.type,
"names": {
"observation.state": state_names,
"observation.effort": state_names,
@@ -203,7 +202,10 @@ def convert_stats_to_json(v1_dir: Path, v2_dir: Path) -> None:
torch.testing.assert_close(stats_json[key], stats[key])
-def get_features_from_hf_dataset(dataset: Dataset, robot_config: dict | None = None) -> dict[str, list]:
+def get_features_from_hf_dataset(
+ dataset: Dataset, robot_config: RobotConfig | None = None
+) -> dict[str, list]:
+ robot_config = parse_robot_config(robot_config)
features = {}
for key, ft in dataset.features.items():
if isinstance(ft, datasets.Value):
@@ -224,11 +226,11 @@ def get_features_from_hf_dataset(dataset: Dataset, robot_config: dict | None = N
image = dataset[0][key] # Assuming first row
channels = get_image_pixel_channels(image)
shape = (image.height, image.width, channels)
- names = ["height", "width", "channel"]
+ names = ["height", "width", "channels"]
elif ft._type == "VideoFrame":
dtype = "video"
shape = None # Add shape later
- names = ["height", "width", "channel"]
+ names = ["height", "width", "channels"]
features[key] = {
"dtype": dtype,
@@ -436,11 +438,11 @@ def convert_dataset(
single_task: str | None = None,
tasks_path: Path | None = None,
tasks_col: Path | None = None,
- robot_config: dict | None = None,
+ robot_config: RobotConfig | None = None,
test_branch: str | None = None,
**card_kwargs,
):
- v1 = get_hub_safe_version(repo_id, V16)
+ v1 = get_safe_version(repo_id, V16)
v1x_dir = local_dir / V16 / repo_id
v20_dir = local_dir / V20 / repo_id
v1x_dir.mkdir(parents=True, exist_ok=True)
@@ -478,7 +480,7 @@ def convert_dataset(
# Tasks
if single_task:
- tasks_by_episodes = {ep_idx: single_task for ep_idx in episode_indices}
+ tasks_by_episodes = dict.fromkeys(episode_indices, single_task)
dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes)
tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()}
elif tasks_path:
@@ -532,7 +534,7 @@ def convert_dataset(
episode_lengths = split_parquet_by_episodes(dataset, total_episodes, total_chunks, v20_dir)
if robot_config is not None:
- robot_type = robot_config["robot_type"]
+ robot_type = robot_config.type
repo_tags = [robot_type]
else:
robot_type = "unknown"
@@ -595,6 +597,30 @@ def convert_dataset(
create_branch(repo_id=repo_id, branch=V20, repo_type="dataset")
+def make_robot_config(robot_type: str, **kwargs) -> RobotConfig:
+ if robot_type == "aloha":
+ raise NotImplementedError # TODO
+
+ elif robot_type == "koch_follower":
+ from lerobot.robots.koch_follower import KochFollowerConfig
+
+ return KochFollowerConfig(**kwargs)
+ elif robot_type == "so100_follower":
+ from lerobot.robots.so100_follower import SO100FollowerConfig
+
+ return SO100FollowerConfig(**kwargs)
+ elif robot_type == "stretch":
+ from lerobot.robots.stretch3 import Stretch3RobotConfig
+
+ return Stretch3RobotConfig(**kwargs)
+ elif robot_type == "lekiwi":
+ from lerobot.robots.lekiwi import LeKiwiConfig
+
+ return LeKiwiConfig(**kwargs)
+ else:
+ raise ValueError(f"Robot type '{robot_type}' is not available.")
+
+
def main():
parser = argparse.ArgumentParser()
task_args = parser.add_mutually_exclusive_group(required=True)
@@ -621,16 +647,10 @@ def main():
help="The path to a .json file containing one language instruction for each episode_index",
)
parser.add_argument(
- "--robot-config",
- type=Path,
- default=None,
- help="Path to the robot's config yaml the dataset during conversion.",
- )
- parser.add_argument(
- "--robot-overrides",
+ "--robot",
type=str,
- nargs="*",
- help="Any key=value arguments to override the robot config values (use dots for.nested=overrides)",
+ default=None,
+ help="Robot config used for the dataset during conversion (e.g. 'koch', 'aloha', 'so100', etc.)",
)
parser.add_argument(
"--local-dir",
@@ -655,8 +675,10 @@ def main():
if not args.local_dir:
args.local_dir = Path("/tmp/lerobot_dataset_v2")
- robot_config = parse_robot_config(args.robot_config, args.robot_overrides) if args.robot_config else None
- del args.robot_config, args.robot_overrides
+ if args.robot is not None:
+ robot_config = make_robot_config(args.robot)
+
+ del args.robot
convert_dataset(**vars(args), robot_config=robot_config)
diff --git a/src/lerobot/datasets/v21/_remove_language_instruction.py b/src/lerobot/datasets/v21/_remove_language_instruction.py
new file mode 100644
index 0000000000..1f1cb18553
--- /dev/null
+++ b/src/lerobot/datasets/v21/_remove_language_instruction.py
@@ -0,0 +1,87 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import traceback
+from pathlib import Path
+
+from datasets import get_dataset_config_info
+from huggingface_hub import HfApi
+
+from lerobot import available_datasets
+from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
+from lerobot.datasets.utils import INFO_PATH, write_info
+from lerobot.datasets.v21.convert_dataset_v20_to_v21 import V20, SuppressWarnings
+
+LOCAL_DIR = Path("data/")
+
+hub_api = HfApi()
+
+
+def fix_dataset(repo_id: str) -> str:
+ if not hub_api.revision_exists(repo_id, V20, repo_type="dataset"):
+ return f"{repo_id}: skipped (not in {V20})."
+
+ dataset_info = get_dataset_config_info(repo_id, "default")
+ with SuppressWarnings():
+ lerobot_metadata = LeRobotDatasetMetadata(repo_id, revision=V20, force_cache_sync=True)
+
+ meta_features = {key for key, ft in lerobot_metadata.features.items() if ft["dtype"] != "video"}
+ parquet_features = set(dataset_info.features)
+
+ diff_parquet_meta = parquet_features - meta_features
+ diff_meta_parquet = meta_features - parquet_features
+
+ if diff_parquet_meta:
+ raise ValueError(f"In parquet not in info.json: {parquet_features - meta_features}")
+
+ if not diff_meta_parquet:
+ return f"{repo_id}: skipped (no diff)"
+
+ if diff_meta_parquet:
+ logging.warning(f"In info.json not in parquet: {meta_features - parquet_features}")
+ assert diff_meta_parquet == {"language_instruction"}
+ lerobot_metadata.features.pop("language_instruction")
+ write_info(lerobot_metadata.info, lerobot_metadata.root)
+ commit_info = hub_api.upload_file(
+ path_or_fileobj=lerobot_metadata.root / INFO_PATH,
+ path_in_repo=INFO_PATH,
+ repo_id=repo_id,
+ repo_type="dataset",
+ revision=V20,
+ commit_message="Remove 'language_instruction'",
+ create_pr=True,
+ )
+ return f"{repo_id}: success - PR: {commit_info.pr_url}"
+
+
+def batch_fix():
+ status = {}
+ LOCAL_DIR.mkdir(parents=True, exist_ok=True)
+ logfile = LOCAL_DIR / "fix_features_v20.txt"
+ for num, repo_id in enumerate(available_datasets):
+ print(f"\nConverting {repo_id} ({num}/{len(available_datasets)})")
+ print("---------------------------------------------------------")
+ try:
+ status = fix_dataset(repo_id)
+ except Exception:
+ status = f"{repo_id}: failed\n {traceback.format_exc()}"
+
+ logging.info(status)
+ with open(logfile, "a") as file:
+ file.write(status + "\n")
+
+
+if __name__ == "__main__":
+ batch_fix()
diff --git a/src/lerobot/datasets/v21/batch_convert_dataset_v20_to_v21.py b/src/lerobot/datasets/v21/batch_convert_dataset_v20_to_v21.py
new file mode 100644
index 0000000000..b4f1c36c4d
--- /dev/null
+++ b/src/lerobot/datasets/v21/batch_convert_dataset_v20_to_v21.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This script is for internal use to convert all datasets under the 'lerobot' hub user account to v2.1.
+"""
+
+import traceback
+from pathlib import Path
+
+from huggingface_hub import HfApi
+
+from lerobot import available_datasets
+from lerobot.datasets.v21.convert_dataset_v20_to_v21 import V21, convert_dataset
+
+LOCAL_DIR = Path("data/")
+
+
+def batch_convert():
+ status = {}
+ LOCAL_DIR.mkdir(parents=True, exist_ok=True)
+ logfile = LOCAL_DIR / "conversion_log_v21.txt"
+ hub_api = HfApi()
+ for num, repo_id in enumerate(available_datasets):
+ print(f"\nConverting {repo_id} ({num}/{len(available_datasets)})")
+ print("---------------------------------------------------------")
+ try:
+ if hub_api.revision_exists(repo_id, V21, repo_type="dataset"):
+ status = f"{repo_id}: success (already in {V21})."
+ else:
+ convert_dataset(repo_id)
+ status = f"{repo_id}: success."
+ except Exception:
+ status = f"{repo_id}: failed\n {traceback.format_exc()}"
+
+ with open(logfile, "a") as file:
+ file.write(status + "\n")
+
+
+if __name__ == "__main__":
+ batch_convert()
diff --git a/src/lerobot/datasets/v21/convert_dataset_v20_to_v21.py b/src/lerobot/datasets/v21/convert_dataset_v20_to_v21.py
new file mode 100644
index 0000000000..4ebc1086a3
--- /dev/null
+++ b/src/lerobot/datasets/v21/convert_dataset_v20_to_v21.py
@@ -0,0 +1,114 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.0 to
+2.1. It will:
+
+- Generate per-episodes stats and writes them in `episodes_stats.jsonl`
+- Check consistency between these new stats and the old ones.
+- Remove the deprecated `stats.json`.
+- Update codebase_version in `info.json`.
+- Push this new version to the hub on the 'main' branch and tags it with "v2.1".
+
+Usage:
+
+```bash
+python -m lerobot.datasets.v21.convert_dataset_v20_to_v21 \
+ --repo-id=aliberts/koch_tutorial
+```
+
+"""
+
+import argparse
+import logging
+
+from huggingface_hub import HfApi
+
+from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
+from lerobot.datasets.utils import EPISODES_STATS_PATH, STATS_PATH, load_stats, write_info
+from lerobot.datasets.v21.convert_stats import check_aggregate_stats, convert_stats
+
+V20 = "v2.0"
+V21 = "v2.1"
+
+
+class SuppressWarnings:
+ def __enter__(self):
+ self.previous_level = logging.getLogger().getEffectiveLevel()
+ logging.getLogger().setLevel(logging.ERROR)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ logging.getLogger().setLevel(self.previous_level)
+
+
+def convert_dataset(
+ repo_id: str,
+ branch: str | None = None,
+ num_workers: int = 4,
+):
+ with SuppressWarnings():
+ dataset = LeRobotDataset(repo_id, revision=V20, force_cache_sync=True)
+
+ if (dataset.root / EPISODES_STATS_PATH).is_file():
+ (dataset.root / EPISODES_STATS_PATH).unlink()
+
+ convert_stats(dataset, num_workers=num_workers)
+ ref_stats = load_stats(dataset.root)
+ check_aggregate_stats(dataset, ref_stats)
+
+ dataset.meta.info["codebase_version"] = CODEBASE_VERSION
+ write_info(dataset.meta.info, dataset.root)
+
+ dataset.push_to_hub(branch=branch, tag_version=False, allow_patterns="meta/")
+
+ # delete old stats.json file
+ if (dataset.root / STATS_PATH).is_file:
+ (dataset.root / STATS_PATH).unlink()
+
+ hub_api = HfApi()
+ if hub_api.file_exists(
+ repo_id=dataset.repo_id, filename=STATS_PATH, revision=branch, repo_type="dataset"
+ ):
+ hub_api.delete_file(
+ path_in_repo=STATS_PATH, repo_id=dataset.repo_id, revision=branch, repo_type="dataset"
+ )
+
+ hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--repo-id",
+ type=str,
+ required=True,
+ help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset "
+ "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
+ )
+ parser.add_argument(
+ "--branch",
+ type=str,
+ default=None,
+ help="Repo branch to push your dataset. Defaults to the main branch.",
+ )
+ parser.add_argument(
+ "--num-workers",
+ type=int,
+ default=4,
+ help="Number of workers for parallelizing stats compute. Defaults to 4.",
+ )
+
+ args = parser.parse_args()
+ convert_dataset(**vars(args))
diff --git a/src/lerobot/datasets/v21/convert_stats.py b/src/lerobot/datasets/v21/convert_stats.py
new file mode 100644
index 0000000000..462781c158
--- /dev/null
+++ b/src/lerobot/datasets/v21/convert_stats.py
@@ -0,0 +1,99 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+import numpy as np
+from tqdm import tqdm
+
+from lerobot.datasets.compute_stats import aggregate_stats, get_feature_stats, sample_indices
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import write_episode_stats
+
+
+def sample_episode_video_frames(dataset: LeRobotDataset, episode_index: int, ft_key: str) -> np.ndarray:
+ ep_len = dataset.meta.episodes[episode_index]["length"]
+ sampled_indices = sample_indices(ep_len)
+ query_timestamps = dataset._get_query_timestamps(0.0, {ft_key: sampled_indices})
+ video_frames = dataset._query_videos(query_timestamps, episode_index)
+ return video_frames[ft_key].numpy()
+
+
+def convert_episode_stats(dataset: LeRobotDataset, ep_idx: int):
+ ep_start_idx = dataset.episode_data_index["from"][ep_idx]
+ ep_end_idx = dataset.episode_data_index["to"][ep_idx]
+ ep_data = dataset.hf_dataset.select(range(ep_start_idx, ep_end_idx))
+
+ ep_stats = {}
+ for key, ft in dataset.features.items():
+ if ft["dtype"] == "video":
+ # We sample only for videos
+ ep_ft_data = sample_episode_video_frames(dataset, ep_idx, key)
+ else:
+ ep_ft_data = np.array(ep_data[key])
+
+ axes_to_reduce = (0, 2, 3) if ft["dtype"] in ["image", "video"] else 0
+ keepdims = True if ft["dtype"] in ["image", "video"] else ep_ft_data.ndim == 1
+ ep_stats[key] = get_feature_stats(ep_ft_data, axis=axes_to_reduce, keepdims=keepdims)
+
+ if ft["dtype"] in ["image", "video"]: # remove batch dim
+ ep_stats[key] = {
+ k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items()
+ }
+
+ dataset.meta.episodes_stats[ep_idx] = ep_stats
+
+
+def convert_stats(dataset: LeRobotDataset, num_workers: int = 0):
+ assert dataset.episodes is None
+ print("Computing episodes stats")
+ total_episodes = dataset.meta.total_episodes
+ if num_workers > 0:
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
+ futures = {
+ executor.submit(convert_episode_stats, dataset, ep_idx): ep_idx
+ for ep_idx in range(total_episodes)
+ }
+ for future in tqdm(as_completed(futures), total=total_episodes):
+ future.result()
+ else:
+ for ep_idx in tqdm(range(total_episodes)):
+ convert_episode_stats(dataset, ep_idx)
+
+ for ep_idx in tqdm(range(total_episodes)):
+ write_episode_stats(ep_idx, dataset.meta.episodes_stats[ep_idx], dataset.root)
+
+
+def check_aggregate_stats(
+ dataset: LeRobotDataset,
+ reference_stats: dict[str, dict[str, np.ndarray]],
+ video_rtol_atol: tuple[float] = (1e-2, 1e-2),
+ default_rtol_atol: tuple[float] = (5e-6, 6e-5),
+):
+ """Verifies that the aggregated stats from episodes_stats are close to reference stats."""
+ agg_stats = aggregate_stats(list(dataset.meta.episodes_stats.values()))
+ for key, ft in dataset.features.items():
+ # These values might need some fine-tuning
+ if ft["dtype"] == "video":
+ # to account for image sub-sampling
+ rtol, atol = video_rtol_atol
+ else:
+ rtol, atol = default_rtol_atol
+
+ for stat, val in agg_stats[key].items():
+ if key in reference_stats and stat in reference_stats[key]:
+ err_msg = f"feature='{key}' stats='{stat}'"
+ np.testing.assert_allclose(
+ val, reference_stats[key][stat], rtol=rtol, atol=atol, err_msg=err_msg
+ )
diff --git a/src/lerobot/datasets/video_utils.py b/src/lerobot/datasets/video_utils.py
new file mode 100644
index 0000000000..b05edf6bde
--- /dev/null
+++ b/src/lerobot/datasets/video_utils.py
@@ -0,0 +1,517 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import glob
+import importlib
+import logging
+import shutil
+import warnings
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, ClassVar
+
+import av
+import pyarrow as pa
+import torch
+import torchvision
+from datasets.features.features import register_feature
+from PIL import Image
+
+
+def get_safe_default_codec():
+ if importlib.util.find_spec("torchcodec"):
+ return "torchcodec"
+ else:
+ logging.warning(
+ "'torchcodec' is not available in your platform, falling back to 'pyav' as a default decoder"
+ )
+ return "pyav"
+
+
+def decode_video_frames(
+ video_path: Path | str,
+ timestamps: list[float],
+ tolerance_s: float,
+ backend: str | None = None,
+) -> torch.Tensor:
+ """
+ Decodes video frames using the specified backend.
+
+ Args:
+ video_path (Path): Path to the video file.
+ timestamps (list[float]): List of timestamps to extract frames.
+ tolerance_s (float): Allowed deviation in seconds for frame retrieval.
+ backend (str, optional): Backend to use for decoding. Defaults to "torchcodec" when available in the platform; otherwise, defaults to "pyav"..
+
+ Returns:
+ torch.Tensor: Decoded frames.
+
+ Currently supports torchcodec on cpu and pyav.
+ """
+ if backend is None:
+ backend = get_safe_default_codec()
+ if backend == "torchcodec":
+ return decode_video_frames_torchcodec(video_path, timestamps, tolerance_s)
+ elif backend in ["pyav", "video_reader"]:
+ return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
+ else:
+ raise ValueError(f"Unsupported video backend: {backend}")
+
+
+def decode_video_frames_torchvision(
+ video_path: Path | str,
+ timestamps: list[float],
+ tolerance_s: float,
+ backend: str = "pyav",
+ log_loaded_timestamps: bool = False,
+) -> torch.Tensor:
+ """Loads frames associated to the requested timestamps of a video
+
+ The backend can be either "pyav" (default) or "video_reader".
+ "video_reader" requires installing torchvision from source, see:
+ https://github.com/pytorch/vision/blob/main/torchvision/csrc/io/decoder/gpu/README.rst
+ (note that you need to compile against ffmpeg<4.3)
+
+ While both use cpu, "video_reader" is supposedly faster than "pyav" but requires additional setup.
+ For more info on video decoding, see `benchmark/video/README.md`
+
+ See torchvision doc for more info on these two backends:
+ https://pytorch.org/vision/0.18/index.html?highlight=backend#torchvision.set_video_backend
+
+ Note: Video benefits from inter-frame compression. Instead of storing every frame individually,
+ the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to
+ that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame,
+ and all subsequent frames until reaching the requested frame. The number of key frames in a video
+ can be adjusted during encoding to take into account decoding time and video size in bytes.
+ """
+ video_path = str(video_path)
+
+ # set backend
+ keyframes_only = False
+ torchvision.set_video_backend(backend)
+ if backend == "pyav":
+ keyframes_only = True # pyav doesn't support accurate seek
+
+ # set a video stream reader
+ # TODO(rcadene): also load audio stream at the same time
+ reader = torchvision.io.VideoReader(video_path, "video")
+
+ # set the first and last requested timestamps
+ # Note: previous timestamps are usually loaded, since we need to access the previous key frame
+ first_ts = min(timestamps)
+ last_ts = max(timestamps)
+
+ # access closest key frame of the first requested frame
+ # Note: closest key frame timestamp is usually smaller than `first_ts` (e.g. key frame can be the first frame of the video)
+ # for details on what `seek` is doing see: https://pyav.basswood-io.com/docs/stable/api/container.html?highlight=inputcontainer#av.container.InputContainer.seek
+ reader.seek(first_ts, keyframes_only=keyframes_only)
+
+ # load all frames until last requested frame
+ loaded_frames = []
+ loaded_ts = []
+ for frame in reader:
+ current_ts = frame["pts"]
+ if log_loaded_timestamps:
+ logging.info(f"frame loaded at timestamp={current_ts:.4f}")
+ loaded_frames.append(frame["data"])
+ loaded_ts.append(current_ts)
+ if current_ts >= last_ts:
+ break
+
+ if backend == "pyav":
+ reader.container.close()
+
+ reader = None
+
+ query_ts = torch.tensor(timestamps)
+ loaded_ts = torch.tensor(loaded_ts)
+
+ # compute distances between each query timestamp and timestamps of all loaded frames
+ dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1)
+ min_, argmin_ = dist.min(1)
+
+ is_within_tol = min_ < tolerance_s
+ assert is_within_tol.all(), (
+ f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})."
+ "It means that the closest frame that can be loaded from the video is too far away in time."
+ "This might be due to synchronization issues with timestamps during data collection."
+ "To be safe, we advise to ignore this item during training."
+ f"\nqueried timestamps: {query_ts}"
+ f"\nloaded timestamps: {loaded_ts}"
+ f"\nvideo: {video_path}"
+ f"\nbackend: {backend}"
+ )
+
+ # get closest frames to the query timestamps
+ closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_])
+ closest_ts = loaded_ts[argmin_]
+
+ if log_loaded_timestamps:
+ logging.info(f"{closest_ts=}")
+
+ # convert to the pytorch format which is float32 in [0,1] range (and channel first)
+ closest_frames = closest_frames.type(torch.float32) / 255
+
+ assert len(timestamps) == len(closest_frames)
+ return closest_frames
+
+
+def decode_video_frames_torchcodec(
+ video_path: Path | str,
+ timestamps: list[float],
+ tolerance_s: float,
+ device: str = "cpu",
+ log_loaded_timestamps: bool = False,
+) -> torch.Tensor:
+ """Loads frames associated with the requested timestamps of a video using torchcodec.
+
+ Note: Setting device="cuda" outside the main process, e.g. in data loader workers, will lead to CUDA initialization errors.
+
+ Note: Video benefits from inter-frame compression. Instead of storing every frame individually,
+ the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to
+ that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame,
+ and all subsequent frames until reaching the requested frame. The number of key frames in a video
+ can be adjusted during encoding to take into account decoding time and video size in bytes.
+ """
+
+ if importlib.util.find_spec("torchcodec"):
+ from torchcodec.decoders import VideoDecoder
+ else:
+ raise ImportError("torchcodec is required but not available.")
+
+ # initialize video decoder
+ decoder = VideoDecoder(video_path, device=device, seek_mode="approximate")
+ loaded_frames = []
+ loaded_ts = []
+ # get metadata for frame information
+ metadata = decoder.metadata
+ average_fps = metadata.average_fps
+
+ # convert timestamps to frame indices
+ frame_indices = [round(ts * average_fps) for ts in timestamps]
+
+ # retrieve frames based on indices
+ frames_batch = decoder.get_frames_at(indices=frame_indices)
+
+ for frame, pts in zip(frames_batch.data, frames_batch.pts_seconds, strict=False):
+ loaded_frames.append(frame)
+ loaded_ts.append(pts.item())
+ if log_loaded_timestamps:
+ logging.info(f"Frame loaded at timestamp={pts:.4f}")
+
+ query_ts = torch.tensor(timestamps)
+ loaded_ts = torch.tensor(loaded_ts)
+
+ # compute distances between each query timestamp and loaded timestamps
+ dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1)
+ min_, argmin_ = dist.min(1)
+
+ is_within_tol = min_ < tolerance_s
+ assert is_within_tol.all(), (
+ f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})."
+ "It means that the closest frame that can be loaded from the video is too far away in time."
+ "This might be due to synchronization issues with timestamps during data collection."
+ "To be safe, we advise to ignore this item during training."
+ f"\nqueried timestamps: {query_ts}"
+ f"\nloaded timestamps: {loaded_ts}"
+ f"\nvideo: {video_path}"
+ )
+
+ # get closest frames to the query timestamps
+ closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_])
+ closest_ts = loaded_ts[argmin_]
+
+ if log_loaded_timestamps:
+ logging.info(f"{closest_ts=}")
+
+ # convert to float32 in [0,1] range (channel first)
+ closest_frames = closest_frames.type(torch.float32) / 255
+
+ assert len(timestamps) == len(closest_frames)
+ return closest_frames
+
+
+def encode_video_frames(
+ imgs_dir: Path | str,
+ video_path: Path | str,
+ fps: int,
+ vcodec: str = "libsvtav1",
+ pix_fmt: str = "yuv420p",
+ g: int | None = 2,
+ crf: int | None = 30,
+ fast_decode: int = 0,
+ log_level: int | None = av.logging.ERROR,
+ overwrite: bool = False,
+) -> None:
+ """More info on ffmpeg arguments tuning on `benchmark/video/README.md`"""
+ # Check encoder availability
+ if vcodec not in ["h264", "hevc", "libsvtav1"]:
+ raise ValueError(f"Unsupported video codec: {vcodec}. Supported codecs are: h264, hevc, libsvtav1.")
+
+ video_path = Path(video_path)
+ imgs_dir = Path(imgs_dir)
+
+ video_path.parent.mkdir(parents=True, exist_ok=overwrite)
+
+ # Encoders/pixel formats incompatibility check
+ if (vcodec == "libsvtav1" or vcodec == "hevc") and pix_fmt == "yuv444p":
+ logging.warning(
+ f"Incompatible pixel format 'yuv444p' for codec {vcodec}, auto-selecting format 'yuv420p'"
+ )
+ pix_fmt = "yuv420p"
+
+ # Get input frames
+ template = "frame_" + ("[0-9]" * 6) + ".png"
+ input_list = sorted(
+ glob.glob(str(imgs_dir / template)), key=lambda x: int(x.split("_")[-1].split(".")[0])
+ )
+
+ # Define video output frame size (assuming all input frames are the same size)
+ if len(input_list) == 0:
+ raise FileNotFoundError(f"No images found in {imgs_dir}.")
+ dummy_image = Image.open(input_list[0])
+ width, height = dummy_image.size
+
+ # Define video codec options
+ video_options = {}
+
+ if g is not None:
+ video_options["g"] = str(g)
+
+ if crf is not None:
+ video_options["crf"] = str(crf)
+
+ if fast_decode:
+ key = "svtav1-params" if vcodec == "libsvtav1" else "tune"
+ value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode"
+ video_options[key] = value
+
+ # Set logging level
+ if log_level is not None:
+ # "While less efficient, it is generally preferable to modify logging with Python’s logging"
+ logging.getLogger("libav").setLevel(log_level)
+
+ # Create and open output file (overwrite by default)
+ with av.open(str(video_path), "w") as output:
+ output_stream = output.add_stream(vcodec, fps, options=video_options)
+ output_stream.pix_fmt = pix_fmt
+ output_stream.width = width
+ output_stream.height = height
+
+ # Loop through input frames and encode them
+ for input_data in input_list:
+ input_image = Image.open(input_data).convert("RGB")
+ input_frame = av.VideoFrame.from_image(input_image)
+ packet = output_stream.encode(input_frame)
+ if packet:
+ output.mux(packet)
+
+ # Flush the encoder
+ packet = output_stream.encode()
+ if packet:
+ output.mux(packet)
+
+ # Reset logging level
+ if log_level is not None:
+ av.logging.restore_default_callback()
+
+ if not video_path.exists():
+ raise OSError(f"Video encoding did not work. File not found: {video_path}.")
+
+
+@dataclass
+class VideoFrame:
+ # TODO(rcadene, lhoestq): move to Hugging Face `datasets` repo
+ """
+ Provides a type for a dataset containing video frames.
+
+ Example:
+
+ ```python
+ data_dict = [{"image": {"path": "videos/episode_0.mp4", "timestamp": 0.3}}]
+ features = {"image": VideoFrame()}
+ Dataset.from_dict(data_dict, features=Features(features))
+ ```
+ """
+
+ pa_type: ClassVar[Any] = pa.struct({"path": pa.string(), "timestamp": pa.float32()})
+ _type: str = field(default="VideoFrame", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+
+with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ "'register_feature' is experimental and might be subject to breaking changes in the future.",
+ category=UserWarning,
+ )
+ # to make VideoFrame available in HuggingFace `datasets`
+ register_feature(VideoFrame, "VideoFrame")
+
+
+def get_audio_info(video_path: Path | str) -> dict:
+ # Set logging level
+ logging.getLogger("libav").setLevel(av.logging.ERROR)
+
+ # Getting audio stream information
+ audio_info = {}
+ with av.open(str(video_path), "r") as audio_file:
+ try:
+ audio_stream = audio_file.streams.audio[0]
+ except IndexError:
+ # Reset logging level
+ av.logging.restore_default_callback()
+ return {"has_audio": False}
+
+ audio_info["audio.channels"] = audio_stream.channels
+ audio_info["audio.codec"] = audio_stream.codec.canonical_name
+ # In an ideal loseless case : bit depth x sample rate x channels = bit rate.
+ # In an actual compressed case, the bit rate is set according to the compression level : the lower the bit rate, the more compression is applied.
+ audio_info["audio.bit_rate"] = audio_stream.bit_rate
+ audio_info["audio.sample_rate"] = audio_stream.sample_rate # Number of samples per second
+ # In an ideal loseless case : fixed number of bits per sample.
+ # In an actual compressed case : variable number of bits per sample (often reduced to match a given depth rate).
+ audio_info["audio.bit_depth"] = audio_stream.format.bits
+ audio_info["audio.channel_layout"] = audio_stream.layout.name
+ audio_info["has_audio"] = True
+
+ # Reset logging level
+ av.logging.restore_default_callback()
+
+ return audio_info
+
+
+def get_video_info(video_path: Path | str) -> dict:
+ # Set logging level
+ logging.getLogger("libav").setLevel(av.logging.ERROR)
+
+ # Getting video stream information
+ video_info = {}
+ with av.open(str(video_path), "r") as video_file:
+ try:
+ video_stream = video_file.streams.video[0]
+ except IndexError:
+ # Reset logging level
+ av.logging.restore_default_callback()
+ return {}
+
+ video_info["video.height"] = video_stream.height
+ video_info["video.width"] = video_stream.width
+ video_info["video.codec"] = video_stream.codec.canonical_name
+ video_info["video.pix_fmt"] = video_stream.pix_fmt
+ video_info["video.is_depth_map"] = False
+
+ # Calculate fps from r_frame_rate
+ video_info["video.fps"] = int(video_stream.base_rate)
+
+ pixel_channels = get_video_pixel_channels(video_stream.pix_fmt)
+ video_info["video.channels"] = pixel_channels
+
+ # Reset logging level
+ av.logging.restore_default_callback()
+
+ # Adding audio stream information
+ video_info.update(**get_audio_info(video_path))
+
+ return video_info
+
+
+def get_video_pixel_channels(pix_fmt: str) -> int:
+ if "gray" in pix_fmt or "depth" in pix_fmt or "monochrome" in pix_fmt:
+ return 1
+ elif "rgba" in pix_fmt or "yuva" in pix_fmt:
+ return 4
+ elif "rgb" in pix_fmt or "yuv" in pix_fmt:
+ return 3
+ else:
+ raise ValueError("Unknown format")
+
+
+def get_image_pixel_channels(image: Image):
+ if image.mode == "L":
+ return 1 # Grayscale
+ elif image.mode == "LA":
+ return 2 # Grayscale + Alpha
+ elif image.mode == "RGB":
+ return 3 # RGB
+ elif image.mode == "RGBA":
+ return 4 # RGBA
+ else:
+ raise ValueError("Unknown format")
+
+
+class VideoEncodingManager:
+ """
+ Context manager that ensures proper video encoding and data cleanup even if exceptions occur.
+
+ This manager handles:
+ - Batch encoding for any remaining episodes when recording interrupted
+ - Cleaning up temporary image files from interrupted episodes
+ - Removing empty image directories
+
+ Args:
+ dataset: The LeRobotDataset instance
+ """
+
+ def __init__(self, dataset):
+ self.dataset = dataset
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Handle any remaining episodes that haven't been batch encoded
+ if self.dataset.episodes_since_last_encoding > 0:
+ if exc_type is not None:
+ logging.info("Exception occurred. Encoding remaining episodes before exit...")
+ else:
+ logging.info("Recording stopped. Encoding remaining episodes...")
+
+ start_ep = self.dataset.num_episodes - self.dataset.episodes_since_last_encoding
+ end_ep = self.dataset.num_episodes
+ logging.info(
+ f"Encoding remaining {self.dataset.episodes_since_last_encoding} episodes, "
+ f"from episode {start_ep} to {end_ep - 1}"
+ )
+ self.dataset.batch_encode_videos(start_ep, end_ep)
+
+ # Clean up episode images if recording was interrupted
+ if exc_type is not None:
+ interrupted_episode_index = self.dataset.num_episodes
+ for key in self.dataset.meta.video_keys:
+ img_dir = self.dataset._get_image_file_path(
+ episode_index=interrupted_episode_index, image_key=key, frame_index=0
+ ).parent
+ if img_dir.exists():
+ logging.debug(
+ f"Cleaning up interrupted episode images for episode {interrupted_episode_index}, camera {key}"
+ )
+ shutil.rmtree(img_dir)
+
+ # Clean up any remaining images directory if it's empty
+ img_dir = self.dataset.root / "images"
+ # Check for any remaining PNG files
+ png_files = list(img_dir.rglob("*.png"))
+ if len(png_files) == 0:
+ # Only remove the images directory if no PNG files remain
+ if img_dir.exists():
+ shutil.rmtree(img_dir)
+ logging.debug("Cleaned up empty images directory")
+ else:
+ logging.debug(f"Images directory is not empty, containing {len(png_files)} PNG files")
+
+ return False # Don't suppress the original exception
diff --git a/src/lerobot/envs/__init__.py b/src/lerobot/envs/__init__.py
new file mode 100644
index 0000000000..4977d11d9f
--- /dev/null
+++ b/src/lerobot/envs/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv # noqa: F401
diff --git a/src/lerobot/envs/configs.py b/src/lerobot/envs/configs.py
new file mode 100644
index 0000000000..ef381e9e7c
--- /dev/null
+++ b/src/lerobot/envs/configs.py
@@ -0,0 +1,273 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+from dataclasses import dataclass, field
+from typing import Any
+
+import draccus
+
+from lerobot.configs.types import FeatureType, PolicyFeature
+from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE
+from lerobot.robots import RobotConfig
+from lerobot.teleoperators.config import TeleoperatorConfig
+
+
+@dataclass
+class EnvConfig(draccus.ChoiceRegistry, abc.ABC):
+ task: str | None = None
+ fps: int = 30
+ features: dict[str, PolicyFeature] = field(default_factory=dict)
+ features_map: dict[str, str] = field(default_factory=dict)
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
+
+ @property
+ @abc.abstractmethod
+ def gym_kwargs(self) -> dict:
+ raise NotImplementedError()
+
+
+@EnvConfig.register_subclass("aloha")
+@dataclass
+class AlohaEnv(EnvConfig):
+ task: str = "AlohaInsertion-v0"
+ fps: int = 50
+ episode_length: int = 400
+ obs_type: str = "pixels_agent_pos"
+ render_mode: str = "rgb_array"
+ features: dict[str, PolicyFeature] = field(
+ default_factory=lambda: {
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(14,)),
+ }
+ )
+ features_map: dict[str, str] = field(
+ default_factory=lambda: {
+ "action": ACTION,
+ "agent_pos": OBS_STATE,
+ "top": f"{OBS_IMAGE}.top",
+ "pixels/top": f"{OBS_IMAGES}.top",
+ }
+ )
+
+ def __post_init__(self):
+ if self.obs_type == "pixels":
+ self.features["top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 640, 3))
+ elif self.obs_type == "pixels_agent_pos":
+ self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(14,))
+ self.features["pixels/top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 640, 3))
+
+ @property
+ def gym_kwargs(self) -> dict:
+ return {
+ "obs_type": self.obs_type,
+ "render_mode": self.render_mode,
+ "max_episode_steps": self.episode_length,
+ }
+
+
+@EnvConfig.register_subclass("pusht")
+@dataclass
+class PushtEnv(EnvConfig):
+ task: str = "PushT-v0"
+ fps: int = 10
+ episode_length: int = 300
+ obs_type: str = "pixels_agent_pos"
+ render_mode: str = "rgb_array"
+ visualization_width: int = 384
+ visualization_height: int = 384
+ features: dict[str, PolicyFeature] = field(
+ default_factory=lambda: {
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
+ "agent_pos": PolicyFeature(type=FeatureType.STATE, shape=(2,)),
+ }
+ )
+ features_map: dict[str, str] = field(
+ default_factory=lambda: {
+ "action": ACTION,
+ "agent_pos": OBS_STATE,
+ "environment_state": OBS_ENV_STATE,
+ "pixels": OBS_IMAGE,
+ }
+ )
+
+ def __post_init__(self):
+ if self.obs_type == "pixels_agent_pos":
+ self.features["pixels"] = PolicyFeature(type=FeatureType.VISUAL, shape=(384, 384, 3))
+ elif self.obs_type == "environment_state_agent_pos":
+ self.features["environment_state"] = PolicyFeature(type=FeatureType.ENV, shape=(16,))
+
+ @property
+ def gym_kwargs(self) -> dict:
+ return {
+ "obs_type": self.obs_type,
+ "render_mode": self.render_mode,
+ "visualization_width": self.visualization_width,
+ "visualization_height": self.visualization_height,
+ "max_episode_steps": self.episode_length,
+ }
+
+
+@EnvConfig.register_subclass("xarm")
+@dataclass
+class XarmEnv(EnvConfig):
+ task: str = "XarmLift-v0"
+ fps: int = 15
+ episode_length: int = 200
+ obs_type: str = "pixels_agent_pos"
+ render_mode: str = "rgb_array"
+ visualization_width: int = 384
+ visualization_height: int = 384
+ features: dict[str, PolicyFeature] = field(
+ default_factory=lambda: {
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)),
+ "pixels": PolicyFeature(type=FeatureType.VISUAL, shape=(84, 84, 3)),
+ }
+ )
+ features_map: dict[str, str] = field(
+ default_factory=lambda: {
+ "action": ACTION,
+ "agent_pos": OBS_STATE,
+ "pixels": OBS_IMAGE,
+ }
+ )
+
+ def __post_init__(self):
+ if self.obs_type == "pixels_agent_pos":
+ self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(4,))
+
+ @property
+ def gym_kwargs(self) -> dict:
+ return {
+ "obs_type": self.obs_type,
+ "render_mode": self.render_mode,
+ "visualization_width": self.visualization_width,
+ "visualization_height": self.visualization_height,
+ "max_episode_steps": self.episode_length,
+ }
+
+
+@dataclass
+class VideoRecordConfig:
+ """Configuration for video recording in ManiSkill environments."""
+
+ enabled: bool = False
+ record_dir: str = "videos"
+ trajectory_name: str = "trajectory"
+
+
+@dataclass
+class EnvTransformConfig:
+ """Configuration for environment wrappers."""
+
+ # ee_action_space_params: EEActionSpaceConfig = field(default_factory=EEActionSpaceConfig)
+ control_mode: str = "gamepad"
+ display_cameras: bool = False
+ add_joint_velocity_to_observation: bool = False
+ add_current_to_observation: bool = False
+ add_ee_pose_to_observation: bool = False
+ crop_params_dict: dict[str, tuple[int, int, int, int]] | None = None
+ resize_size: tuple[int, int] | None = None
+ control_time_s: float = 20.0
+ fixed_reset_joint_positions: Any | None = None
+ reset_time_s: float = 5.0
+ use_gripper: bool = True
+ gripper_quantization_threshold: float | None = 0.8
+ gripper_penalty: float = 0.0
+ gripper_penalty_in_reward: bool = False
+
+
+@EnvConfig.register_subclass(name="gym_manipulator")
+@dataclass
+class HILSerlRobotEnvConfig(EnvConfig):
+ """Configuration for the HILSerlRobotEnv environment."""
+
+ robot: RobotConfig | None = None
+ teleop: TeleoperatorConfig | None = None
+ wrapper: EnvTransformConfig | None = None
+ fps: int = 10
+ name: str = "real_robot"
+ mode: str = None # Either "record", "replay", None
+ repo_id: str | None = None
+ dataset_root: str | None = None
+ task: str = ""
+ num_episodes: int = 10 # only for record mode
+ episode: int = 0
+ device: str = "cuda"
+ push_to_hub: bool = True
+ pretrained_policy_name_or_path: str | None = None
+ reward_classifier_pretrained_path: str | None = None
+ # For the reward classifier, to record more positive examples after a success
+ number_of_steps_after_success: int = 0
+
+ def gym_kwargs(self) -> dict:
+ return {}
+
+
+@EnvConfig.register_subclass("hil")
+@dataclass
+class HILEnvConfig(EnvConfig):
+ """Configuration for the HIL environment."""
+
+ type: str = "hil"
+ name: str = "PandaPickCube"
+ task: str = "PandaPickCubeKeyboard-v0"
+ use_viewer: bool = True
+ gripper_penalty: float = 0.0
+ use_gamepad: bool = True
+ state_dim: int = 18
+ action_dim: int = 4
+ fps: int = 100
+ episode_length: int = 100
+ video_record: VideoRecordConfig = field(default_factory=VideoRecordConfig)
+ features: dict[str, PolicyFeature] = field(
+ default_factory=lambda: {
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)),
+ "observation.image": PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
+ "observation.state": PolicyFeature(type=FeatureType.STATE, shape=(18,)),
+ }
+ )
+ features_map: dict[str, str] = field(
+ default_factory=lambda: {
+ "action": ACTION,
+ "observation.image": OBS_IMAGE,
+ "observation.state": OBS_STATE,
+ }
+ )
+ ################# args from hilserlrobotenv
+ reward_classifier_pretrained_path: str | None = None
+ robot_config: RobotConfig | None = None
+ teleop_config: TeleoperatorConfig | None = None
+ wrapper: EnvTransformConfig | None = None
+ mode: str = None # Either "record", "replay", None
+ repo_id: str | None = None
+ dataset_root: str | None = None
+ num_episodes: int = 10 # only for record mode
+ episode: int = 0
+ device: str = "cuda"
+ push_to_hub: bool = True
+ pretrained_policy_name_or_path: str | None = None
+ # For the reward classifier, to record more positive examples after a success
+ number_of_steps_after_success: int = 0
+ ############################
+
+ @property
+ def gym_kwargs(self) -> dict:
+ return {
+ "use_viewer": self.use_viewer,
+ "use_gamepad": self.use_gamepad,
+ "gripper_penalty": self.gripper_penalty,
+ }
diff --git a/src/lerobot/envs/factory.py b/src/lerobot/envs/factory.py
new file mode 100644
index 0000000000..dc6d96d61a
--- /dev/null
+++ b/src/lerobot/envs/factory.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+
+import gymnasium as gym
+
+from lerobot.envs.configs import AlohaEnv, EnvConfig, HILEnvConfig, PushtEnv, XarmEnv
+
+
+def make_env_config(env_type: str, **kwargs) -> EnvConfig:
+ if env_type == "aloha":
+ return AlohaEnv(**kwargs)
+ elif env_type == "pusht":
+ return PushtEnv(**kwargs)
+ elif env_type == "xarm":
+ return XarmEnv(**kwargs)
+ elif env_type == "hil":
+ return HILEnvConfig(**kwargs)
+ else:
+ raise ValueError(f"Policy type '{env_type}' is not available.")
+
+
+def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> gym.vector.VectorEnv | None:
+ """Makes a gym vector environment according to the config.
+
+ Args:
+ cfg (EnvConfig): the config of the environment to instantiate.
+ n_envs (int, optional): The number of parallelized env to return. Defaults to 1.
+ use_async_envs (bool, optional): Whether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to
+ False.
+
+ Raises:
+ ValueError: if n_envs < 1
+ ModuleNotFoundError: If the requested env package is not installed
+
+ Returns:
+ gym.vector.VectorEnv: The parallelized gym.env instance.
+ """
+ if n_envs < 1:
+ raise ValueError("`n_envs must be at least 1")
+
+ package_name = f"gym_{cfg.type}"
+
+ try:
+ importlib.import_module(package_name)
+ except ModuleNotFoundError as e:
+ print(f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.type}]'`")
+ raise e
+
+ gym_handle = f"{package_name}/{cfg.task}"
+
+ # batched version of the env that returns an observation of shape (b, c)
+ env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv
+ env = env_cls(
+ [lambda: gym.make(gym_handle, disable_env_checker=True, **cfg.gym_kwargs) for _ in range(n_envs)]
+ )
+
+ return env
diff --git a/src/lerobot/envs/utils.py b/src/lerobot/envs/utils.py
new file mode 100644
index 0000000000..00676a0110
--- /dev/null
+++ b/src/lerobot/envs/utils.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import warnings
+from typing import Any
+
+import einops
+import gymnasium as gym
+import numpy as np
+import torch
+from torch import Tensor
+
+from lerobot.configs.types import FeatureType, PolicyFeature
+from lerobot.envs.configs import EnvConfig
+from lerobot.utils.utils import get_channel_first_image_shape
+
+
+def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]:
+ # TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding)
+ """Convert environment observation to LeRobot format observation.
+ Args:
+ observation: Dictionary of observation batches from a Gym vector environment.
+ Returns:
+ Dictionary of observation batches with keys renamed to LeRobot format and values as tensors.
+ """
+ # map to expected inputs for the policy
+ return_observations = {}
+ if "pixels" in observations:
+ if isinstance(observations["pixels"], dict):
+ imgs = {f"observation.images.{key}": img for key, img in observations["pixels"].items()}
+ else:
+ imgs = {"observation.image": observations["pixels"]}
+
+ for imgkey, img in imgs.items():
+ # TODO(aliberts, rcadene): use transforms.ToTensor()?
+ img = torch.from_numpy(img)
+
+ # When preprocessing observations in a non-vectorized environment, we need to add a batch dimension.
+ # This is the case for human-in-the-loop RL where there is only one environment.
+ if img.ndim == 3:
+ img = img.unsqueeze(0)
+ # sanity check that images are channel last
+ _, h, w, c = img.shape
+ assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}"
+
+ # sanity check that images are uint8
+ assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}"
+
+ # convert to channel first of type float32 in range [0,1]
+ img = einops.rearrange(img, "b h w c -> b c h w").contiguous()
+ img = img.type(torch.float32)
+ img /= 255
+
+ return_observations[imgkey] = img
+
+ if "environment_state" in observations:
+ env_state = torch.from_numpy(observations["environment_state"]).float()
+ if env_state.dim() == 1:
+ env_state = env_state.unsqueeze(0)
+
+ return_observations["observation.environment_state"] = env_state
+
+ # TODO(rcadene): enable pixels only baseline with `obs_type="pixels"` in environment by removing
+ agent_pos = torch.from_numpy(observations["agent_pos"]).float()
+ if agent_pos.dim() == 1:
+ agent_pos = agent_pos.unsqueeze(0)
+ return_observations["observation.state"] = agent_pos
+
+ return return_observations
+
+
+def env_to_policy_features(env_cfg: EnvConfig) -> dict[str, PolicyFeature]:
+ # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is
+ # (need to also refactor preprocess_observation and externalize normalization from policies)
+ policy_features = {}
+ for key, ft in env_cfg.features.items():
+ if ft.type is FeatureType.VISUAL:
+ if len(ft.shape) != 3:
+ raise ValueError(f"Number of dimensions of {key} != 3 (shape={ft.shape})")
+
+ shape = get_channel_first_image_shape(ft.shape)
+ feature = PolicyFeature(type=ft.type, shape=shape)
+ else:
+ feature = ft
+
+ policy_key = env_cfg.features_map[key]
+ policy_features[policy_key] = feature
+
+ return policy_features
+
+
+def are_all_envs_same_type(env: gym.vector.VectorEnv) -> bool:
+ first_type = type(env.envs[0]) # Get type of first env
+ return all(type(e) is first_type for e in env.envs) # Fast type check
+
+
+def check_env_attributes_and_types(env: gym.vector.VectorEnv) -> None:
+ with warnings.catch_warnings():
+ warnings.simplefilter("once", UserWarning) # Apply filter only in this function
+
+ if not (hasattr(env.envs[0], "task_description") and hasattr(env.envs[0], "task")):
+ warnings.warn(
+ "The environment does not have 'task_description' and 'task'. Some policies require these features.",
+ UserWarning,
+ stacklevel=2,
+ )
+ if not are_all_envs_same_type(env):
+ warnings.warn(
+ "The environments have different types. Make sure you infer the right task from each environment. Empty task will be passed instead.",
+ UserWarning,
+ stacklevel=2,
+ )
+
+
+def add_envs_task(env: gym.vector.VectorEnv, observation: dict[str, Any]) -> dict[str, Any]:
+ """Adds task feature to the observation dict with respect to the first environment attribute."""
+ if hasattr(env.envs[0], "task_description"):
+ observation["task"] = env.call("task_description")
+ elif hasattr(env.envs[0], "task"):
+ observation["task"] = env.call("task")
+ else: # For envs without language instructions, e.g. aloha transfer cube and etc.
+ num_envs = observation[list(observation.keys())[0]].shape[0]
+ observation["task"] = ["" for _ in range(num_envs)]
+ return observation
diff --git a/src/lerobot/errors.py b/src/lerobot/errors.py
new file mode 100644
index 0000000000..c02d568d44
--- /dev/null
+++ b/src/lerobot/errors.py
@@ -0,0 +1,43 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class DeviceNotConnectedError(ConnectionError):
+ """Exception raised when the device is not connected."""
+
+ def __init__(self, message="This device is not connected. Try calling `connect()` first."):
+ self.message = message
+ super().__init__(self.message)
+
+
+class DeviceAlreadyConnectedError(ConnectionError):
+ """Exception raised when the device is already connected."""
+
+ def __init__(
+ self,
+ message="This device is already connected. Try not calling `connect()` twice.",
+ ):
+ self.message = message
+ super().__init__(self.message)
+
+
+class InvalidActionError(ValueError):
+ """Exception raised when an action is already invalid."""
+
+ def __init__(
+ self,
+ message="The action is invalid. Check the value follows what it is expected from the action space.",
+ ):
+ self.message = message
+ super().__init__(self.message)
diff --git a/src/lerobot/find_cameras.py b/src/lerobot/find_cameras.py
new file mode 100644
index 0000000000..be8f272eee
--- /dev/null
+++ b/src/lerobot/find_cameras.py
@@ -0,0 +1,315 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Helper to find the camera devices available in your system.
+
+Example:
+
+```shell
+python -m lerobot.find_cameras
+```
+"""
+
+# NOTE(Steven): RealSense can also be identified/opened as OpenCV cameras. If you know the camera is a RealSense, use the `lerobot.find_cameras realsense` flag to avoid confusion.
+# NOTE(Steven): macOS cameras sometimes report different FPS at init time, not an issue here as we don't specify FPS when opening the cameras, but the information displayed might not be truthful.
+
+import argparse
+import concurrent.futures
+import logging
+import time
+from pathlib import Path
+from typing import Any
+
+import numpy as np
+from PIL import Image
+
+from lerobot.cameras.configs import ColorMode
+from lerobot.cameras.opencv.camera_opencv import OpenCVCamera
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.cameras.realsense.camera_realsense import RealSenseCamera
+from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig
+
+logger = logging.getLogger(__name__)
+
+
+def find_all_opencv_cameras() -> list[dict[str, Any]]:
+ """
+ Finds all available OpenCV cameras plugged into the system.
+
+ Returns:
+ A list of all available OpenCV cameras with their metadata.
+ """
+ all_opencv_cameras_info: list[dict[str, Any]] = []
+ logger.info("Searching for OpenCV cameras...")
+ try:
+ opencv_cameras = OpenCVCamera.find_cameras()
+ for cam_info in opencv_cameras:
+ all_opencv_cameras_info.append(cam_info)
+ logger.info(f"Found {len(opencv_cameras)} OpenCV cameras.")
+ except Exception as e:
+ logger.error(f"Error finding OpenCV cameras: {e}")
+
+ return all_opencv_cameras_info
+
+
+def find_all_realsense_cameras() -> list[dict[str, Any]]:
+ """
+ Finds all available RealSense cameras plugged into the system.
+
+ Returns:
+ A list of all available RealSense cameras with their metadata.
+ """
+ all_realsense_cameras_info: list[dict[str, Any]] = []
+ logger.info("Searching for RealSense cameras...")
+ try:
+ realsense_cameras = RealSenseCamera.find_cameras()
+ for cam_info in realsense_cameras:
+ all_realsense_cameras_info.append(cam_info)
+ logger.info(f"Found {len(realsense_cameras)} RealSense cameras.")
+ except ImportError:
+ logger.warning("Skipping RealSense camera search: pyrealsense2 library not found or not importable.")
+ except Exception as e:
+ logger.error(f"Error finding RealSense cameras: {e}")
+
+ return all_realsense_cameras_info
+
+
+def find_and_print_cameras(camera_type_filter: str | None = None) -> list[dict[str, Any]]:
+ """
+ Finds available cameras based on an optional filter and prints their information.
+
+ Args:
+ camera_type_filter: Optional string to filter cameras ("realsense" or "opencv").
+ If None, lists all cameras.
+
+ Returns:
+ A list of all available cameras matching the filter, with their metadata.
+ """
+ all_cameras_info: list[dict[str, Any]] = []
+
+ if camera_type_filter:
+ camera_type_filter = camera_type_filter.lower()
+
+ if camera_type_filter is None or camera_type_filter == "opencv":
+ all_cameras_info.extend(find_all_opencv_cameras())
+ if camera_type_filter is None or camera_type_filter == "realsense":
+ all_cameras_info.extend(find_all_realsense_cameras())
+
+ if not all_cameras_info:
+ if camera_type_filter:
+ logger.warning(f"No {camera_type_filter} cameras were detected.")
+ else:
+ logger.warning("No cameras (OpenCV or RealSense) were detected.")
+ else:
+ print("\n--- Detected Cameras ---")
+ for i, cam_info in enumerate(all_cameras_info):
+ print(f"Camera #{i}:")
+ for key, value in cam_info.items():
+ if key == "default_stream_profile" and isinstance(value, dict):
+ print(f" {key.replace('_', ' ').capitalize()}:")
+ for sub_key, sub_value in value.items():
+ print(f" {sub_key.capitalize()}: {sub_value}")
+ else:
+ print(f" {key.replace('_', ' ').capitalize()}: {value}")
+ print("-" * 20)
+ return all_cameras_info
+
+
+def save_image(
+ img_array: np.ndarray,
+ camera_identifier: str | int,
+ images_dir: Path,
+ camera_type: str,
+):
+ """
+ Saves a single image to disk using Pillow. Handles color conversion if necessary.
+ """
+ try:
+ img = Image.fromarray(img_array, mode="RGB")
+
+ safe_identifier = str(camera_identifier).replace("/", "_").replace("\\", "_")
+ filename_prefix = f"{camera_type.lower()}_{safe_identifier}"
+ filename = f"{filename_prefix}.png"
+
+ path = images_dir / filename
+ path.parent.mkdir(parents=True, exist_ok=True)
+ img.save(str(path))
+ logger.info(f"Saved image: {path}")
+ except Exception as e:
+ logger.error(f"Failed to save image for camera {camera_identifier} (type {camera_type}): {e}")
+
+
+def create_camera_instance(cam_meta: dict[str, Any]) -> dict[str, Any] | None:
+ """Create and connect to a camera instance based on metadata."""
+ cam_type = cam_meta.get("type")
+ cam_id = cam_meta.get("id")
+ instance = None
+
+ logger.info(f"Preparing {cam_type} ID {cam_id} with default profile")
+
+ try:
+ if cam_type == "OpenCV":
+ cv_config = OpenCVCameraConfig(
+ index_or_path=cam_id,
+ color_mode=ColorMode.RGB,
+ )
+ instance = OpenCVCamera(cv_config)
+ elif cam_type == "RealSense":
+ rs_config = RealSenseCameraConfig(
+ serial_number_or_name=cam_id,
+ color_mode=ColorMode.RGB,
+ )
+ instance = RealSenseCamera(rs_config)
+ else:
+ logger.warning(f"Unknown camera type: {cam_type} for ID {cam_id}. Skipping.")
+ return None
+
+ if instance:
+ logger.info(f"Connecting to {cam_type} camera: {cam_id}...")
+ instance.connect(warmup=False)
+ return {"instance": instance, "meta": cam_meta}
+ except Exception as e:
+ logger.error(f"Failed to connect or configure {cam_type} camera {cam_id}: {e}")
+ if instance and instance.is_connected:
+ instance.disconnect()
+ return None
+
+
+def process_camera_image(
+ cam_dict: dict[str, Any], output_dir: Path, current_time: float
+) -> concurrent.futures.Future | None:
+ """Capture and process an image from a single camera."""
+ cam = cam_dict["instance"]
+ meta = cam_dict["meta"]
+ cam_type_str = str(meta.get("type", "unknown"))
+ cam_id_str = str(meta.get("id", "unknown"))
+
+ try:
+ image_data = cam.read()
+
+ return save_image(
+ image_data,
+ cam_id_str,
+ output_dir,
+ cam_type_str,
+ )
+ except TimeoutError:
+ logger.warning(
+ f"Timeout reading from {cam_type_str} camera {cam_id_str} at time {current_time:.2f}s."
+ )
+ except Exception as e:
+ logger.error(f"Error reading from {cam_type_str} camera {cam_id_str}: {e}")
+ return None
+
+
+def cleanup_cameras(cameras_to_use: list[dict[str, Any]]):
+ """Disconnect all cameras."""
+ logger.info(f"Disconnecting {len(cameras_to_use)} cameras...")
+ for cam_dict in cameras_to_use:
+ try:
+ if cam_dict["instance"] and cam_dict["instance"].is_connected:
+ cam_dict["instance"].disconnect()
+ except Exception as e:
+ logger.error(f"Error disconnecting camera {cam_dict['meta'].get('id')}: {e}")
+
+
+def save_images_from_all_cameras(
+ output_dir: Path,
+ record_time_s: float = 2.0,
+ camera_type: str | None = None,
+):
+ """
+ Connects to detected cameras (optionally filtered by type) and saves images from each.
+ Uses default stream profiles for width, height, and FPS.
+
+ Args:
+ output_dir: Directory to save images.
+ record_time_s: Duration in seconds to record images.
+ camera_type: Optional string to filter cameras ("realsense" or "opencv").
+ If None, uses all detected cameras.
+ """
+ output_dir.mkdir(parents=True, exist_ok=True)
+ logger.info(f"Saving images to {output_dir}")
+ all_camera_metadata = find_and_print_cameras(camera_type_filter=camera_type)
+
+ if not all_camera_metadata:
+ logger.warning("No cameras detected matching the criteria. Cannot save images.")
+ return
+
+ cameras_to_use = []
+ for cam_meta in all_camera_metadata:
+ camera_instance = create_camera_instance(cam_meta)
+ if camera_instance:
+ cameras_to_use.append(camera_instance)
+
+ if not cameras_to_use:
+ logger.warning("No cameras could be connected. Aborting image save.")
+ return
+
+ logger.info(f"Starting image capture for {record_time_s} seconds from {len(cameras_to_use)} cameras.")
+ start_time = time.perf_counter()
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(cameras_to_use) * 2) as executor:
+ try:
+ while time.perf_counter() - start_time < record_time_s:
+ futures = []
+ current_capture_time = time.perf_counter()
+
+ for cam_dict in cameras_to_use:
+ future = process_camera_image(cam_dict, output_dir, current_capture_time)
+ if future:
+ futures.append(future)
+
+ if futures:
+ concurrent.futures.wait(futures)
+
+ except KeyboardInterrupt:
+ logger.info("Capture interrupted by user.")
+ finally:
+ print("\nFinalizing image saving...")
+ executor.shutdown(wait=True)
+ cleanup_cameras(cameras_to_use)
+ print(f"Image capture finished. Images saved to {output_dir}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Unified camera utility script for listing cameras and capturing images."
+ )
+
+ parser.add_argument(
+ "camera_type",
+ type=str,
+ nargs="?",
+ default=None,
+ choices=["realsense", "opencv"],
+ help="Specify camera type to capture from (e.g., 'realsense', 'opencv'). Captures from all if omitted.",
+ )
+ parser.add_argument(
+ "--output-dir",
+ type=Path,
+ default="outputs/captured_images",
+ help="Directory to save images. Default: outputs/captured_images",
+ )
+ parser.add_argument(
+ "--record-time-s",
+ type=float,
+ default=6.0,
+ help="Time duration to attempt capturing frames. Default: 6 seconds.",
+ )
+ args = parser.parse_args()
+ save_images_from_all_cameras(**vars(args))
diff --git a/src/lerobot/find_port.py b/src/lerobot/find_port.py
new file mode 100644
index 0000000000..cf02825073
--- /dev/null
+++ b/src/lerobot/find_port.py
@@ -0,0 +1,65 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Helper to find the USB port associated with your MotorsBus.
+
+Example:
+
+```shell
+python -m lerobot.find_port
+```
+"""
+
+import platform
+import time
+from pathlib import Path
+
+
+def find_available_ports():
+ from serial.tools import list_ports # Part of pyserial library
+
+ if platform.system() == "Windows":
+ # List COM ports using pyserial
+ ports = [port.device for port in list_ports.comports()]
+ else: # Linux/macOS
+ # List /dev/tty* ports for Unix-based systems
+ ports = [str(path) for path in Path("/dev").glob("tty*")]
+ return ports
+
+
+def find_port():
+ print("Finding all available ports for the MotorsBus.")
+ ports_before = find_available_ports()
+ print("Ports before disconnecting:", ports_before)
+
+ print("Remove the USB cable from your MotorsBus and press Enter when done.")
+ input() # Wait for user to disconnect the device
+
+ time.sleep(0.5) # Allow some time for port to be released
+ ports_after = find_available_ports()
+ ports_diff = list(set(ports_before) - set(ports_after))
+
+ if len(ports_diff) == 1:
+ port = ports_diff[0]
+ print(f"The port of this MotorsBus is '{port}'")
+ print("Reconnect the USB cable.")
+ elif len(ports_diff) == 0:
+ raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).")
+ else:
+ raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).")
+
+
+if __name__ == "__main__":
+ find_port()
diff --git a/src/lerobot/model/kinematics.py b/src/lerobot/model/kinematics.py
new file mode 100644
index 0000000000..f059b97907
--- /dev/null
+++ b/src/lerobot/model/kinematics.py
@@ -0,0 +1,128 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+
+class RobotKinematics:
+ """Robot kinematics using placo library for forward and inverse kinematics."""
+
+ def __init__(
+ self,
+ urdf_path: str,
+ target_frame_name: str = "gripper_frame_link",
+ joint_names: list[str] = None,
+ ):
+ """
+ Initialize placo-based kinematics solver.
+
+ Args:
+ urdf_path: Path to the robot URDF file
+ target_frame_name: Name of the end-effector frame in the URDF
+ joint_names: List of joint names to use for the kinematics solver
+ """
+ try:
+ import placo
+ except ImportError as e:
+ raise ImportError(
+ "placo is required for RobotKinematics. "
+ "Please install the optional dependencies of `kinematics` in the package."
+ ) from e
+
+ self.robot = placo.RobotWrapper(urdf_path)
+ self.solver = placo.KinematicsSolver(self.robot)
+ self.solver.mask_fbase(True) # Fix the base
+
+ self.target_frame_name = target_frame_name
+
+ # Set joint names
+ self.joint_names = list(self.robot.joint_names()) if joint_names is None else joint_names
+
+ # Initialize frame task for IK
+ self.tip_frame = self.solver.add_frame_task(self.target_frame_name, np.eye(4))
+
+ def forward_kinematics(self, joint_pos_deg):
+ """
+ Compute forward kinematics for given joint configuration given the target frame name in the constructor.
+
+ Args:
+ joint_pos_deg: Joint positions in degrees (numpy array)
+
+ Returns:
+ 4x4 transformation matrix of the end-effector pose
+ """
+
+ # Convert degrees to radians
+ joint_pos_rad = np.deg2rad(joint_pos_deg[: len(self.joint_names)])
+
+ # Update joint positions in placo robot
+ for i, joint_name in enumerate(self.joint_names):
+ self.robot.set_joint(joint_name, joint_pos_rad[i])
+
+ # Update kinematics
+ self.robot.update_kinematics()
+
+ # Get the transformation matrix
+ return self.robot.get_T_world_frame(self.target_frame_name)
+
+ def inverse_kinematics(
+ self, current_joint_pos, desired_ee_pose, position_weight=1.0, orientation_weight=0.01
+ ):
+ """
+ Compute inverse kinematics using placo solver.
+
+ Args:
+ current_joint_pos: Current joint positions in degrees (used as initial guess)
+ desired_ee_pose: Target end-effector pose as a 4x4 transformation matrix
+ position_weight: Weight for position constraint in IK
+ orientation_weight: Weight for orientation constraint in IK, set to 0.0 to only constrain position
+
+ Returns:
+ Joint positions in degrees that achieve the desired end-effector pose
+ """
+
+ # Convert current joint positions to radians for initial guess
+ current_joint_rad = np.deg2rad(current_joint_pos[: len(self.joint_names)])
+
+ # Set current joint positions as initial guess
+ for i, joint_name in enumerate(self.joint_names):
+ self.robot.set_joint(joint_name, current_joint_rad[i])
+
+ # Update the target pose for the frame task
+ self.tip_frame.T_world_frame = desired_ee_pose
+
+ # Configure the task based on position_only flag
+ self.tip_frame.configure(self.target_frame_name, "soft", position_weight, orientation_weight)
+
+ # Solve IK
+ self.solver.solve(True)
+ self.robot.update_kinematics()
+
+ # Extract joint positions
+ joint_pos_rad = []
+ for joint_name in self.joint_names:
+ joint = self.robot.get_joint(joint_name)
+ joint_pos_rad.append(joint)
+
+ # Convert back to degrees
+ joint_pos_deg = np.rad2deg(joint_pos_rad)
+
+ # Preserve gripper position if present in current_joint_pos
+ if len(current_joint_pos) > len(self.joint_names):
+ result = np.zeros_like(current_joint_pos)
+ result[: len(self.joint_names)] = joint_pos_deg
+ result[len(self.joint_names) :] = current_joint_pos[len(self.joint_names) :]
+ return result
+ else:
+ return joint_pos_deg
diff --git a/src/lerobot/motors/__init__.py b/src/lerobot/motors/__init__.py
new file mode 100644
index 0000000000..dfbfbaee8f
--- /dev/null
+++ b/src/lerobot/motors/__init__.py
@@ -0,0 +1 @@
+from .motors_bus import Motor, MotorCalibration, MotorNormMode, MotorsBus
diff --git a/src/lerobot/motors/calibration_gui.py b/src/lerobot/motors/calibration_gui.py
new file mode 100644
index 0000000000..9832a1636b
--- /dev/null
+++ b/src/lerobot/motors/calibration_gui.py
@@ -0,0 +1,401 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import os
+from dataclasses import dataclass
+
+os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
+
+from lerobot.motors import MotorCalibration, MotorsBus
+
+BAR_LEN, BAR_THICKNESS = 450, 8
+HANDLE_R = 10
+BRACKET_W, BRACKET_H = 6, 14
+TRI_W, TRI_H = 12, 14
+
+BTN_W, BTN_H = 60, 22
+SAVE_W, SAVE_H = 80, 28
+LOAD_W = 80
+DD_W, DD_H = 160, 28
+
+TOP_GAP = 50
+PADDING_Y, TOP_OFFSET = 70, 60
+FONT_SIZE, FPS = 20, 60
+
+BG_COLOR = (30, 30, 30)
+BAR_RED, BAR_GREEN = (200, 60, 60), (60, 200, 60)
+HANDLE_COLOR, TEXT_COLOR = (240, 240, 240), (250, 250, 250)
+TICK_COLOR = (250, 220, 40)
+BTN_COLOR, BTN_COLOR_HL = (80, 80, 80), (110, 110, 110)
+DD_COLOR, DD_COLOR_HL = (70, 70, 70), (100, 100, 100)
+
+
+def dist(a, b):
+ return math.hypot(a[0] - b[0], a[1] - b[1])
+
+
+@dataclass
+class RangeValues:
+ min_v: int
+ pos_v: int
+ max_v: int
+
+
+class RangeSlider:
+ """One motor = one slider row"""
+
+ def __init__(self, motor, idx, res, calibration, present, label_pad, base_y):
+ import pygame
+
+ self.motor = motor
+ self.res = res
+ self.x0 = 40 + label_pad
+ self.x1 = self.x0 + BAR_LEN
+ self.y = base_y + idx * PADDING_Y
+
+ self.min_v = calibration.range_min
+ self.max_v = calibration.range_max
+ self.pos_v = max(self.min_v, min(present, self.max_v))
+
+ self.min_x = self._pos_from_val(self.min_v)
+ self.max_x = self._pos_from_val(self.max_v)
+ self.pos_x = self._pos_from_val(self.pos_v)
+
+ self.min_btn = pygame.Rect(self.x0 - BTN_W - 6, self.y - BTN_H // 2, BTN_W, BTN_H)
+ self.max_btn = pygame.Rect(self.x1 + 6, self.y - BTN_H // 2, BTN_W, BTN_H)
+
+ self.drag_min = self.drag_max = self.drag_pos = False
+ self.tick_val = present
+ self.font = pygame.font.Font(None, FONT_SIZE)
+
+ def _val_from_pos(self, x):
+ return round((x - self.x0) / BAR_LEN * self.res)
+
+ def _pos_from_val(self, v):
+ return self.x0 + (v / self.res) * BAR_LEN
+
+ def set_tick(self, v):
+ self.tick_val = max(0, min(v, self.res))
+
+ def _triangle_hit(self, pos):
+ import pygame
+
+ tri_top = self.y - BAR_THICKNESS // 2 - 2
+ return pygame.Rect(self.pos_x - TRI_W // 2, tri_top - TRI_H, TRI_W, TRI_H).collidepoint(pos)
+
+ def handle_event(self, e):
+ import pygame
+
+ if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
+ if self.min_btn.collidepoint(e.pos):
+ self.min_x, self.min_v = self.pos_x, self.pos_v
+ return
+ if self.max_btn.collidepoint(e.pos):
+ self.max_x, self.max_v = self.pos_x, self.pos_v
+ return
+ if dist(e.pos, (self.min_x, self.y)) <= HANDLE_R:
+ self.drag_min = True
+ elif dist(e.pos, (self.max_x, self.y)) <= HANDLE_R:
+ self.drag_max = True
+ elif self._triangle_hit(e.pos):
+ self.drag_pos = True
+
+ elif e.type == pygame.MOUSEBUTTONUP and e.button == 1:
+ self.drag_min = self.drag_max = self.drag_pos = False
+
+ elif e.type == pygame.MOUSEMOTION:
+ x = e.pos[0]
+ if self.drag_min:
+ self.min_x = max(self.x0, min(x, self.pos_x))
+ elif self.drag_max:
+ self.max_x = min(self.x1, max(x, self.pos_x))
+ elif self.drag_pos:
+ self.pos_x = max(self.min_x, min(x, self.max_x))
+
+ self.min_v = self._val_from_pos(self.min_x)
+ self.max_v = self._val_from_pos(self.max_x)
+ self.pos_v = self._val_from_pos(self.pos_x)
+
+ def _draw_button(self, surf, rect, text):
+ import pygame
+
+ clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR
+ pygame.draw.rect(surf, clr, rect, border_radius=4)
+ t = self.font.render(text, True, TEXT_COLOR)
+ surf.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2))
+
+ def draw(self, surf):
+ import pygame
+
+ # motor name above set-min button (right-aligned)
+ name_surf = self.font.render(self.motor, True, TEXT_COLOR)
+ surf.blit(
+ name_surf,
+ (self.min_btn.right - name_surf.get_width(), self.min_btn.y - name_surf.get_height() - 4),
+ )
+
+ # bar + active section
+ pygame.draw.rect(surf, BAR_RED, (self.x0, self.y - BAR_THICKNESS // 2, BAR_LEN, BAR_THICKNESS))
+ pygame.draw.rect(
+ surf, BAR_GREEN, (self.min_x, self.y - BAR_THICKNESS // 2, self.max_x - self.min_x, BAR_THICKNESS)
+ )
+
+ # tick
+ tick_x = self._pos_from_val(self.tick_val)
+ pygame.draw.line(
+ surf,
+ TICK_COLOR,
+ (tick_x, self.y - BAR_THICKNESS // 2 - 4),
+ (tick_x, self.y + BAR_THICKNESS // 2 + 4),
+ 2,
+ )
+
+ # brackets
+ for x, sign in ((self.min_x, +1), (self.max_x, -1)):
+ pygame.draw.line(
+ surf, HANDLE_COLOR, (x, self.y - BRACKET_H // 2), (x, self.y + BRACKET_H // 2), 2
+ )
+ pygame.draw.line(
+ surf,
+ HANDLE_COLOR,
+ (x, self.y - BRACKET_H // 2),
+ (x + sign * BRACKET_W, self.y - BRACKET_H // 2),
+ 2,
+ )
+ pygame.draw.line(
+ surf,
+ HANDLE_COLOR,
+ (x, self.y + BRACKET_H // 2),
+ (x + sign * BRACKET_W, self.y + BRACKET_H // 2),
+ 2,
+ )
+
+ # triangle ▼
+ tri_top = self.y - BAR_THICKNESS // 2 - 2
+ pygame.draw.polygon(
+ surf,
+ HANDLE_COLOR,
+ [
+ (self.pos_x, tri_top),
+ (self.pos_x - TRI_W // 2, tri_top - TRI_H),
+ (self.pos_x + TRI_W // 2, tri_top - TRI_H),
+ ],
+ )
+
+ # numeric labels
+ fh = self.font.get_height()
+ pos_y = tri_top - TRI_H - 4 - fh
+ txts = [
+ (self.min_v, self.min_x, self.y - BRACKET_H // 2 - 4 - fh),
+ (self.max_v, self.max_x, self.y - BRACKET_H // 2 - 4 - fh),
+ (self.pos_v, self.pos_x, pos_y),
+ ]
+ for v, x, y in txts:
+ s = self.font.render(str(v), True, TEXT_COLOR)
+ surf.blit(s, (x - s.get_width() // 2, y))
+
+ # buttons
+ self._draw_button(surf, self.min_btn, "set min")
+ self._draw_button(surf, self.max_btn, "set max")
+
+ # external
+ def values(self) -> RangeValues:
+ return RangeValues(self.min_v, self.pos_v, self.max_v)
+
+
+class RangeFinderGUI:
+ def __init__(self, bus: MotorsBus, groups: dict[str, list[str]] | None = None):
+ import pygame
+
+ self.bus = bus
+ self.groups = groups if groups is not None else {"all": list(bus.motors)}
+ self.group_names = list(groups)
+ self.current_group = self.group_names[0]
+
+ if not bus.is_connected:
+ bus.connect()
+
+ self.calibration = bus.read_calibration()
+ self.res_table = bus.model_resolution_table
+ self.present_cache = {
+ m: bus.read("Present_Position", m, normalize=False) for motors in groups.values() for m in motors
+ }
+
+ pygame.init()
+ self.font = pygame.font.Font(None, FONT_SIZE)
+
+ label_pad = max(self.font.size(m)[0] for ms in groups.values() for m in ms)
+ self.label_pad = label_pad
+ width = 40 + label_pad + BAR_LEN + 6 + BTN_W + 10 + SAVE_W + 10
+ self.controls_bottom = 10 + SAVE_H
+ self.base_y = self.controls_bottom + TOP_GAP
+ height = self.base_y + PADDING_Y * len(groups[self.current_group]) + 40
+
+ self.screen = pygame.display.set_mode((width, height))
+ pygame.display.set_caption("Motors range finder")
+
+ # ui rects
+ self.save_btn = pygame.Rect(width - SAVE_W - 10, 10, SAVE_W, SAVE_H)
+ self.load_btn = pygame.Rect(self.save_btn.left - LOAD_W - 10, 10, LOAD_W, SAVE_H)
+ self.dd_btn = pygame.Rect(width // 2 - DD_W // 2, 10, DD_W, DD_H)
+ self.dd_open = False # dropdown expanded?
+
+ self.clock = pygame.time.Clock()
+ self._build_sliders()
+ self._adjust_height()
+
+ def _adjust_height(self):
+ import pygame
+
+ motors = self.groups[self.current_group]
+ new_h = self.base_y + PADDING_Y * len(motors) + 40
+ if new_h != self.screen.get_height():
+ w = self.screen.get_width()
+ self.screen = pygame.display.set_mode((w, new_h))
+
+ def _build_sliders(self):
+ self.sliders: list[RangeSlider] = []
+ motors = self.groups[self.current_group]
+ for i, m in enumerate(motors):
+ self.sliders.append(
+ RangeSlider(
+ motor=m,
+ idx=i,
+ res=self.res_table[self.bus.motors[m].model] - 1,
+ calibration=self.calibration[m],
+ present=self.present_cache[m],
+ label_pad=self.label_pad,
+ base_y=self.base_y,
+ )
+ )
+
+ def _draw_dropdown(self):
+ import pygame
+
+ # collapsed box
+ hover = self.dd_btn.collidepoint(pygame.mouse.get_pos())
+ pygame.draw.rect(self.screen, DD_COLOR_HL if hover else DD_COLOR, self.dd_btn, border_radius=6)
+
+ txt = self.font.render(self.current_group, True, TEXT_COLOR)
+ self.screen.blit(
+ txt, (self.dd_btn.centerx - txt.get_width() // 2, self.dd_btn.centery - txt.get_height() // 2)
+ )
+
+ tri_w, tri_h = 12, 6
+ cx = self.dd_btn.right - 14
+ cy = self.dd_btn.centery + 1
+ pygame.draw.polygon(
+ self.screen,
+ TEXT_COLOR,
+ [(cx - tri_w // 2, cy - tri_h // 2), (cx + tri_w // 2, cy - tri_h // 2), (cx, cy + tri_h // 2)],
+ )
+
+ if not self.dd_open:
+ return
+
+ # expanded list
+ for i, name in enumerate(self.group_names):
+ item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H)
+ clr = DD_COLOR_HL if item_rect.collidepoint(pygame.mouse.get_pos()) else DD_COLOR
+ pygame.draw.rect(self.screen, clr, item_rect)
+ t = self.font.render(name, True, TEXT_COLOR)
+ self.screen.blit(
+ t, (item_rect.centerx - t.get_width() // 2, item_rect.centery - t.get_height() // 2)
+ )
+
+ def _handle_dropdown_event(self, e):
+ import pygame
+
+ if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
+ if self.dd_btn.collidepoint(e.pos):
+ self.dd_open = not self.dd_open
+ return True
+ if self.dd_open:
+ for i, name in enumerate(self.group_names):
+ item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H)
+ if item_rect.collidepoint(e.pos):
+ if name != self.current_group:
+ self.current_group = name
+ self._build_sliders()
+ self._adjust_height()
+ self.dd_open = False
+ return True
+ self.dd_open = False
+ return False
+
+ def _save_current(self):
+ for s in self.sliders:
+ self.calibration[s.motor].range_min = s.min_v
+ self.calibration[s.motor].range_max = s.max_v
+
+ with self.bus.torque_disabled():
+ self.bus.write_calibration(self.calibration)
+
+ def _load_current(self):
+ self.calibration = self.bus.read_calibration()
+ for s in self.sliders:
+ s.min_v = self.calibration[s.motor].range_min
+ s.max_v = self.calibration[s.motor].range_max
+ s.min_x = s._pos_from_val(s.min_v)
+ s.max_x = s._pos_from_val(s.max_v)
+
+ def run(self) -> dict[str, MotorCalibration]:
+ import pygame
+
+ while True:
+ for e in pygame.event.get():
+ if e.type == pygame.QUIT:
+ pygame.quit()
+ return self.calibration
+
+ if self._handle_dropdown_event(e):
+ continue
+
+ if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
+ if self.save_btn.collidepoint(e.pos):
+ self._save_current()
+ elif self.load_btn.collidepoint(e.pos):
+ self._load_current()
+
+ for s in self.sliders:
+ s.handle_event(e)
+
+ # live goal write while dragging
+ for s in self.sliders:
+ if s.drag_pos:
+ self.bus.write("Goal_Position", s.motor, s.pos_v, normalize=False)
+
+ # tick update
+ for s in self.sliders:
+ pos = self.bus.read("Present_Position", s.motor, normalize=False)
+ s.set_tick(pos)
+ self.present_cache[s.motor] = pos
+
+ # ─ drawing
+ self.screen.fill(BG_COLOR)
+ for s in self.sliders:
+ s.draw(self.screen)
+
+ self._draw_dropdown()
+
+ # load / save buttons
+ for rect, text in ((self.load_btn, "LOAD"), (self.save_btn, "SAVE")):
+ clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR
+ pygame.draw.rect(self.screen, clr, rect, border_radius=6)
+ t = self.font.render(text, True, TEXT_COLOR)
+ self.screen.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2))
+
+ pygame.display.flip()
+ self.clock.tick(FPS)
diff --git a/src/lerobot/motors/dynamixel/__init__.py b/src/lerobot/motors/dynamixel/__init__.py
new file mode 100644
index 0000000000..425f8538ab
--- /dev/null
+++ b/src/lerobot/motors/dynamixel/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .dynamixel import DriveMode, DynamixelMotorsBus, OperatingMode, TorqueMode
+from .tables import *
diff --git a/src/lerobot/motors/dynamixel/dynamixel.py b/src/lerobot/motors/dynamixel/dynamixel.py
new file mode 100644
index 0000000000..1113ec0f7e
--- /dev/null
+++ b/src/lerobot/motors/dynamixel/dynamixel.py
@@ -0,0 +1,264 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(aliberts): Should we implement FastSyncRead/Write?
+# https://github.com/ROBOTIS-GIT/DynamixelSDK/pull/643
+# https://github.com/ROBOTIS-GIT/DynamixelSDK/releases/tag/3.8.2
+# https://emanual.robotis.com/docs/en/dxl/protocol2/#fast-sync-read-0x8a
+# -> Need to check compatibility across models
+
+import logging
+from copy import deepcopy
+from enum import Enum
+
+from lerobot.utils.encoding_utils import decode_twos_complement, encode_twos_complement
+
+from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address
+from .tables import (
+ AVAILABLE_BAUDRATES,
+ MODEL_BAUDRATE_TABLE,
+ MODEL_CONTROL_TABLE,
+ MODEL_ENCODING_TABLE,
+ MODEL_NUMBER_TABLE,
+ MODEL_RESOLUTION,
+)
+
+PROTOCOL_VERSION = 2.0
+DEFAULT_BAUDRATE = 1_000_000
+DEFAULT_TIMEOUT_MS = 1000
+
+NORMALIZED_DATA = ["Goal_Position", "Present_Position"]
+
+logger = logging.getLogger(__name__)
+
+
+class OperatingMode(Enum):
+ # DYNAMIXEL only controls current(torque) regardless of speed and position. This mode is ideal for a
+ # gripper or a system that only uses current(torque) control or a system that has additional
+ # velocity/position controllers.
+ CURRENT = 0
+
+ # This mode controls velocity. This mode is identical to the Wheel Mode(endless) from existing DYNAMIXEL.
+ # This mode is ideal for wheel-type robots.
+ VELOCITY = 1
+
+ # This mode controls position. This mode is identical to the Joint Mode from existing DYNAMIXEL. Operating
+ # position range is limited by the Max Position Limit(48) and the Min Position Limit(52). This mode is
+ # ideal for articulated robots that each joint rotates less than 360 degrees.
+ POSITION = 3
+
+ # This mode controls position. This mode is identical to the Multi-turn Position Control from existing
+ # DYNAMIXEL. 512 turns are supported(-256[rev] ~ 256[rev]). This mode is ideal for multi-turn wrists or
+ # conveyer systems or a system that requires an additional reduction gear. Note that Max Position
+ # Limit(48), Min Position Limit(52) are not used on Extended Position Control Mode.
+ EXTENDED_POSITION = 4
+
+ # This mode controls both position and current(torque). Up to 512 turns are supported (-256[rev] ~
+ # 256[rev]). This mode is ideal for a system that requires both position and current control such as
+ # articulated robots or grippers.
+ CURRENT_POSITION = 5
+
+ # This mode directly controls PWM output. (Voltage Control Mode)
+ PWM = 16
+
+
+class DriveMode(Enum):
+ NON_INVERTED = 0
+ INVERTED = 1
+
+
+class TorqueMode(Enum):
+ ENABLED = 1
+ DISABLED = 0
+
+
+def _split_into_byte_chunks(value: int, length: int) -> list[int]:
+ import dynamixel_sdk as dxl
+
+ if length == 1:
+ data = [value]
+ elif length == 2:
+ data = [dxl.DXL_LOBYTE(value), dxl.DXL_HIBYTE(value)]
+ elif length == 4:
+ data = [
+ dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
+ dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
+ dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)),
+ dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)),
+ ]
+ return data
+
+
+class DynamixelMotorsBus(MotorsBus):
+ """
+ The Dynamixel implementation for a MotorsBus. It relies on the python dynamixel sdk to communicate with
+ the motors. For more info, see the Dynamixel SDK Documentation:
+ https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20
+ """
+
+ apply_drive_mode = False
+ available_baudrates = deepcopy(AVAILABLE_BAUDRATES)
+ default_baudrate = DEFAULT_BAUDRATE
+ default_timeout = DEFAULT_TIMEOUT_MS
+ model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE)
+ model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
+ model_encoding_table = deepcopy(MODEL_ENCODING_TABLE)
+ model_number_table = deepcopy(MODEL_NUMBER_TABLE)
+ model_resolution_table = deepcopy(MODEL_RESOLUTION)
+ normalized_data = deepcopy(NORMALIZED_DATA)
+
+ def __init__(
+ self,
+ port: str,
+ motors: dict[str, Motor],
+ calibration: dict[str, MotorCalibration] | None = None,
+ ):
+ super().__init__(port, motors, calibration)
+ import dynamixel_sdk as dxl
+
+ self.port_handler = dxl.PortHandler(self.port)
+ self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
+ self.sync_reader = dxl.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0)
+ self.sync_writer = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0)
+ self._comm_success = dxl.COMM_SUCCESS
+ self._no_error = 0x00
+
+ def _assert_protocol_is_compatible(self, instruction_name: str) -> None:
+ pass
+
+ def _handshake(self) -> None:
+ self._assert_motors_exist()
+
+ def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
+ model = self.motors[motor].model
+ search_baudrates = (
+ [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model]
+ )
+
+ for baudrate in search_baudrates:
+ self.set_baudrate(baudrate)
+ id_model = self.broadcast_ping()
+ if id_model:
+ found_id, found_model = next(iter(id_model.items()))
+ expected_model_nb = self.model_number_table[model]
+ if found_model != expected_model_nb:
+ raise RuntimeError(
+ f"Found one motor on {baudrate=} with id={found_id} but it has a "
+ f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. "
+ f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')."
+ )
+ return baudrate, found_id
+
+ raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.")
+
+ def configure_motors(self, return_delay_time=0) -> None:
+ # By default, Dynamixel motors have a 500µs delay response time (corresponding to a value of 250 on
+ # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0).
+ for motor in self.motors:
+ self.write("Return_Delay_Time", motor, return_delay_time)
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.calibration == self.read_calibration()
+
+ def read_calibration(self) -> dict[str, MotorCalibration]:
+ offsets = self.sync_read("Homing_Offset", normalize=False)
+ mins = self.sync_read("Min_Position_Limit", normalize=False)
+ maxes = self.sync_read("Max_Position_Limit", normalize=False)
+ drive_modes = self.sync_read("Drive_Mode", normalize=False)
+
+ calibration = {}
+ for motor, m in self.motors.items():
+ calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=drive_modes[motor],
+ homing_offset=offsets[motor],
+ range_min=mins[motor],
+ range_max=maxes[motor],
+ )
+
+ return calibration
+
+ def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
+ for motor, calibration in calibration_dict.items():
+ self.write("Homing_Offset", motor, calibration.homing_offset)
+ self.write("Min_Position_Limit", motor, calibration.range_min)
+ self.write("Max_Position_Limit", motor, calibration.range_max)
+
+ if cache:
+ self.calibration = calibration_dict
+
+ def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
+ for motor in self._get_motors_list(motors):
+ self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry)
+
+ def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None:
+ addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable")
+ self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry)
+
+ def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
+ for motor in self._get_motors_list(motors):
+ self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry)
+
+ def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
+ for id_ in ids_values:
+ model = self._id_to_model(id_)
+ encoding_table = self.model_encoding_table.get(model)
+ if encoding_table and data_name in encoding_table:
+ n_bytes = encoding_table[data_name]
+ ids_values[id_] = encode_twos_complement(ids_values[id_], n_bytes)
+
+ return ids_values
+
+ def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
+ for id_ in ids_values:
+ model = self._id_to_model(id_)
+ encoding_table = self.model_encoding_table.get(model)
+ if encoding_table and data_name in encoding_table:
+ n_bytes = encoding_table[data_name]
+ ids_values[id_] = decode_twos_complement(ids_values[id_], n_bytes)
+
+ return ids_values
+
+ def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]:
+ """
+ On Dynamixel Motors:
+ Present_Position = Actual_Position + Homing_Offset
+ """
+ half_turn_homings = {}
+ for motor, pos in positions.items():
+ model = self._get_motor_model(motor)
+ max_res = self.model_resolution_table[model] - 1
+ half_turn_homings[motor] = int(max_res / 2) - pos
+
+ return half_turn_homings
+
+ def _split_into_byte_chunks(self, value: int, length: int) -> list[int]:
+ return _split_into_byte_chunks(value, length)
+
+ def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None:
+ for n_try in range(1 + num_retry):
+ data_list, comm = self.packet_handler.broadcastPing(self.port_handler)
+ if self._is_comm_success(comm):
+ break
+ logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})")
+ logger.debug(self.packet_handler.getTxRxResult(comm))
+
+ if not self._is_comm_success(comm):
+ if raise_on_error:
+ raise ConnectionError(self.packet_handler.getTxRxResult(comm))
+
+ return
+
+ return {id_: data[0] for id_, data in data_list.items()}
diff --git a/src/lerobot/motors/dynamixel/tables.py b/src/lerobot/motors/dynamixel/tables.py
new file mode 100644
index 0000000000..8b67bbf38e
--- /dev/null
+++ b/src/lerobot/motors/dynamixel/tables.py
@@ -0,0 +1,197 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(Steven): Consider doing the following:
+# from enum import Enum
+# class MyControlTableKey(Enum):
+# ID = "ID"
+# GOAL_SPEED = "Goal_Speed"
+# ...
+#
+# MY_CONTROL_TABLE ={
+# MyControlTableKey.ID.value: (5,1)
+# MyControlTableKey.GOAL_SPEED.value: (46, 2)
+# ...
+# }
+# This allows me do to:
+# bus.write(MyControlTableKey.GOAL_SPEED, ...)
+# Instead of:
+# bus.write("Goal_Speed", ...)
+# This is important for two reasons:
+# 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError
+# 2. We can change the value of the MyControlTableKey enums without impacting the client code
+
+
+# {data_name: (address, size_byte)}
+# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table
+X_SERIES_CONTROL_TABLE = {
+ "Model_Number": (0, 2),
+ "Model_Information": (2, 4),
+ "Firmware_Version": (6, 1),
+ "ID": (7, 1),
+ "Baud_Rate": (8, 1),
+ "Return_Delay_Time": (9, 1),
+ "Drive_Mode": (10, 1),
+ "Operating_Mode": (11, 1),
+ "Secondary_ID": (12, 1),
+ "Protocol_Type": (13, 1),
+ "Homing_Offset": (20, 4),
+ "Moving_Threshold": (24, 4),
+ "Temperature_Limit": (31, 1),
+ "Max_Voltage_Limit": (32, 2),
+ "Min_Voltage_Limit": (34, 2),
+ "PWM_Limit": (36, 2),
+ "Current_Limit": (38, 2),
+ "Acceleration_Limit": (40, 4),
+ "Velocity_Limit": (44, 4),
+ "Max_Position_Limit": (48, 4),
+ "Min_Position_Limit": (52, 4),
+ "Shutdown": (63, 1),
+ "Torque_Enable": (64, 1),
+ "LED": (65, 1),
+ "Status_Return_Level": (68, 1),
+ "Registered_Instruction": (69, 1),
+ "Hardware_Error_Status": (70, 1),
+ "Velocity_I_Gain": (76, 2),
+ "Velocity_P_Gain": (78, 2),
+ "Position_D_Gain": (80, 2),
+ "Position_I_Gain": (82, 2),
+ "Position_P_Gain": (84, 2),
+ "Feedforward_2nd_Gain": (88, 2),
+ "Feedforward_1st_Gain": (90, 2),
+ "Bus_Watchdog": (98, 1),
+ "Goal_PWM": (100, 2),
+ "Goal_Current": (102, 2),
+ "Goal_Velocity": (104, 4),
+ "Profile_Acceleration": (108, 4),
+ "Profile_Velocity": (112, 4),
+ "Goal_Position": (116, 4),
+ "Realtime_Tick": (120, 2),
+ "Moving": (122, 1),
+ "Moving_Status": (123, 1),
+ "Present_PWM": (124, 2),
+ "Present_Current": (126, 2),
+ "Present_Velocity": (128, 4),
+ "Present_Position": (132, 4),
+ "Velocity_Trajectory": (136, 4),
+ "Position_Trajectory": (140, 4),
+ "Present_Input_Voltage": (144, 2),
+ "Present_Temperature": (146, 1),
+}
+
+# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#baud-rate8
+X_SERIES_BAUDRATE_TABLE = {
+ 9_600: 0,
+ 57_600: 1,
+ 115_200: 2,
+ 1_000_000: 3,
+ 2_000_000: 4,
+ 3_000_000: 5,
+ 4_000_000: 6,
+}
+
+# {data_name: size_byte}
+X_SERIES_ENCODINGS_TABLE = {
+ "Homing_Offset": X_SERIES_CONTROL_TABLE["Homing_Offset"][1],
+ "Goal_PWM": X_SERIES_CONTROL_TABLE["Goal_PWM"][1],
+ "Goal_Current": X_SERIES_CONTROL_TABLE["Goal_Current"][1],
+ "Goal_Velocity": X_SERIES_CONTROL_TABLE["Goal_Velocity"][1],
+ "Present_PWM": X_SERIES_CONTROL_TABLE["Present_PWM"][1],
+ "Present_Current": X_SERIES_CONTROL_TABLE["Present_Current"][1],
+ "Present_Velocity": X_SERIES_CONTROL_TABLE["Present_Velocity"][1],
+}
+
+MODEL_ENCODING_TABLE = {
+ "x_series": X_SERIES_ENCODINGS_TABLE,
+ "xl330-m077": X_SERIES_ENCODINGS_TABLE,
+ "xl330-m288": X_SERIES_ENCODINGS_TABLE,
+ "xl430-w250": X_SERIES_ENCODINGS_TABLE,
+ "xm430-w350": X_SERIES_ENCODINGS_TABLE,
+ "xm540-w270": X_SERIES_ENCODINGS_TABLE,
+ "xc430-w150": X_SERIES_ENCODINGS_TABLE,
+}
+
+# {model: model_resolution}
+# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#specifications
+MODEL_RESOLUTION = {
+ "x_series": 4096,
+ "xl330-m077": 4096,
+ "xl330-m288": 4096,
+ "xl430-w250": 4096,
+ "xm430-w350": 4096,
+ "xm540-w270": 4096,
+ "xc430-w150": 4096,
+}
+
+# {model: model_number}
+# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table-of-eeprom-area
+MODEL_NUMBER_TABLE = {
+ "xl330-m077": 1190,
+ "xl330-m288": 1200,
+ "xl430-w250": 1060,
+ "xm430-w350": 1020,
+ "xm540-w270": 1120,
+ "xc430-w150": 1070,
+}
+
+# {model: available_operating_modes}
+# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#operating-mode11
+MODEL_OPERATING_MODES = {
+ "xl330-m077": [0, 1, 3, 4, 5, 16],
+ "xl330-m288": [0, 1, 3, 4, 5, 16],
+ "xl430-w250": [1, 3, 4, 16],
+ "xm430-w350": [0, 1, 3, 4, 5, 16],
+ "xm540-w270": [0, 1, 3, 4, 5, 16],
+ "xc430-w150": [1, 3, 4, 16],
+}
+
+MODEL_CONTROL_TABLE = {
+ "x_series": X_SERIES_CONTROL_TABLE,
+ "xl330-m077": X_SERIES_CONTROL_TABLE,
+ "xl330-m288": X_SERIES_CONTROL_TABLE,
+ "xl430-w250": X_SERIES_CONTROL_TABLE,
+ "xm430-w350": X_SERIES_CONTROL_TABLE,
+ "xm540-w270": X_SERIES_CONTROL_TABLE,
+ "xc430-w150": X_SERIES_CONTROL_TABLE,
+}
+
+MODEL_BAUDRATE_TABLE = {
+ "x_series": X_SERIES_BAUDRATE_TABLE,
+ "xl330-m077": X_SERIES_BAUDRATE_TABLE,
+ "xl330-m288": X_SERIES_BAUDRATE_TABLE,
+ "xl430-w250": X_SERIES_BAUDRATE_TABLE,
+ "xm430-w350": X_SERIES_BAUDRATE_TABLE,
+ "xm540-w270": X_SERIES_BAUDRATE_TABLE,
+ "xc430-w150": X_SERIES_BAUDRATE_TABLE,
+}
+
+AVAILABLE_BAUDRATES = [
+ 9_600,
+ 19_200,
+ 38_400,
+ 57_600,
+ 115_200,
+ 230_400,
+ 460_800,
+ 500_000,
+ 576_000,
+ 921_600,
+ 1_000_000,
+ 1_152_000,
+ 2_000_000,
+ 2_500_000,
+ 3_000_000,
+ 3_500_000,
+ 4_000_000,
+]
diff --git a/src/lerobot/motors/feetech/__init__.py b/src/lerobot/motors/feetech/__init__.py
new file mode 100644
index 0000000000..75da2d2212
--- /dev/null
+++ b/src/lerobot/motors/feetech/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .feetech import DriveMode, FeetechMotorsBus, OperatingMode, TorqueMode
+from .tables import *
diff --git a/src/lerobot/motors/feetech/feetech.py b/src/lerobot/motors/feetech/feetech.py
new file mode 100644
index 0000000000..88d45ba394
--- /dev/null
+++ b/src/lerobot/motors/feetech/feetech.py
@@ -0,0 +1,455 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from copy import deepcopy
+from enum import Enum
+from pprint import pformat
+
+from lerobot.utils.encoding_utils import decode_sign_magnitude, encode_sign_magnitude
+
+from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address
+from .tables import (
+ FIRMWARE_MAJOR_VERSION,
+ FIRMWARE_MINOR_VERSION,
+ MODEL_BAUDRATE_TABLE,
+ MODEL_CONTROL_TABLE,
+ MODEL_ENCODING_TABLE,
+ MODEL_NUMBER,
+ MODEL_NUMBER_TABLE,
+ MODEL_PROTOCOL,
+ MODEL_RESOLUTION,
+ SCAN_BAUDRATES,
+)
+
+DEFAULT_PROTOCOL_VERSION = 0
+DEFAULT_BAUDRATE = 1_000_000
+DEFAULT_TIMEOUT_MS = 1000
+
+NORMALIZED_DATA = ["Goal_Position", "Present_Position"]
+
+logger = logging.getLogger(__name__)
+
+
+class OperatingMode(Enum):
+ # position servo mode
+ POSITION = 0
+ # The motor is in constant speed mode, which is controlled by parameter 0x2e, and the highest bit 15 is
+ # the direction bit
+ VELOCITY = 1
+ # PWM open-loop speed regulation mode, with parameter 0x2c running time parameter control, bit11 as
+ # direction bit
+ PWM = 2
+ # In step servo mode, the number of step progress is represented by parameter 0x2a, and the highest bit 15
+ # is the direction bit
+ STEP = 3
+
+
+class DriveMode(Enum):
+ NON_INVERTED = 0
+ INVERTED = 1
+
+
+class TorqueMode(Enum):
+ ENABLED = 1
+ DISABLED = 0
+
+
+def _split_into_byte_chunks(value: int, length: int) -> list[int]:
+ import scservo_sdk as scs
+
+ if length == 1:
+ data = [value]
+ elif length == 2:
+ data = [scs.SCS_LOBYTE(value), scs.SCS_HIBYTE(value)]
+ elif length == 4:
+ data = [
+ scs.SCS_LOBYTE(scs.SCS_LOWORD(value)),
+ scs.SCS_HIBYTE(scs.SCS_LOWORD(value)),
+ scs.SCS_LOBYTE(scs.SCS_HIWORD(value)),
+ scs.SCS_HIBYTE(scs.SCS_HIWORD(value)),
+ ]
+ return data
+
+
+def patch_setPacketTimeout(self, packet_length): # noqa: N802
+ """
+ HACK: This patches the PortHandler behavior to set the correct packet timeouts.
+
+ It fixes https://gitee.com/ftservo/SCServoSDK/issues/IBY2S6
+ The bug is fixed on the official Feetech SDK repo (https://gitee.com/ftservo/FTServo_Python)
+ but because that version is not published on PyPI, we rely on the (unofficial) on that is, which needs
+ patching.
+ """
+ self.packet_start_time = self.getCurrentTime()
+ self.packet_timeout = (self.tx_time_per_byte * packet_length) + (self.tx_time_per_byte * 3.0) + 50
+
+
+class FeetechMotorsBus(MotorsBus):
+ """
+ The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on the
+ python feetech sdk to communicate with the motors, which is itself based on the dynamixel sdk.
+ """
+
+ apply_drive_mode = True
+ available_baudrates = deepcopy(SCAN_BAUDRATES)
+ default_baudrate = DEFAULT_BAUDRATE
+ default_timeout = DEFAULT_TIMEOUT_MS
+ model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE)
+ model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
+ model_encoding_table = deepcopy(MODEL_ENCODING_TABLE)
+ model_number_table = deepcopy(MODEL_NUMBER_TABLE)
+ model_resolution_table = deepcopy(MODEL_RESOLUTION)
+ normalized_data = deepcopy(NORMALIZED_DATA)
+
+ def __init__(
+ self,
+ port: str,
+ motors: dict[str, Motor],
+ calibration: dict[str, MotorCalibration] | None = None,
+ protocol_version: int = DEFAULT_PROTOCOL_VERSION,
+ ):
+ super().__init__(port, motors, calibration)
+ self.protocol_version = protocol_version
+ self._assert_same_protocol()
+ import scservo_sdk as scs
+
+ self.port_handler = scs.PortHandler(self.port)
+ # HACK: monkeypatch
+ self.port_handler.setPacketTimeout = patch_setPacketTimeout.__get__(
+ self.port_handler, scs.PortHandler
+ )
+ self.packet_handler = scs.PacketHandler(protocol_version)
+ self.sync_reader = scs.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0)
+ self.sync_writer = scs.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0)
+ self._comm_success = scs.COMM_SUCCESS
+ self._no_error = 0x00
+
+ if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models):
+ raise ValueError(f"Some motors are incompatible with protocol_version={self.protocol_version}")
+
+ def _assert_same_protocol(self) -> None:
+ if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models):
+ raise RuntimeError("Some motors use an incompatible protocol.")
+
+ def _assert_protocol_is_compatible(self, instruction_name: str) -> None:
+ if instruction_name == "sync_read" and self.protocol_version == 1:
+ raise NotImplementedError(
+ "'Sync Read' is not available with Feetech motors using Protocol 1. Use 'Read' sequentially instead."
+ )
+ if instruction_name == "broadcast_ping" and self.protocol_version == 1:
+ raise NotImplementedError(
+ "'Broadcast Ping' is not available with Feetech motors using Protocol 1. Use 'Ping' sequentially instead."
+ )
+
+ def _assert_same_firmware(self) -> None:
+ firmware_versions = self._read_firmware_version(self.ids, raise_on_error=True)
+ if len(set(firmware_versions.values())) != 1:
+ raise RuntimeError(
+ "Some Motors use different firmware versions:"
+ f"\n{pformat(firmware_versions)}\n"
+ "Update their firmware first using Feetech's software. "
+ "Visit https://www.feetechrc.com/software."
+ )
+
+ def _handshake(self) -> None:
+ self._assert_motors_exist()
+ self._assert_same_firmware()
+
+ def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
+ if self.protocol_version == 0:
+ return self._find_single_motor_p0(motor, initial_baudrate)
+ else:
+ return self._find_single_motor_p1(motor, initial_baudrate)
+
+ def _find_single_motor_p0(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
+ model = self.motors[motor].model
+ search_baudrates = (
+ [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model]
+ )
+ expected_model_nb = self.model_number_table[model]
+
+ for baudrate in search_baudrates:
+ self.set_baudrate(baudrate)
+ id_model = self.broadcast_ping()
+ if id_model:
+ found_id, found_model = next(iter(id_model.items()))
+ if found_model != expected_model_nb:
+ raise RuntimeError(
+ f"Found one motor on {baudrate=} with id={found_id} but it has a "
+ f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. "
+ f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')."
+ )
+ return baudrate, found_id
+
+ raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.")
+
+ def _find_single_motor_p1(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
+ import scservo_sdk as scs
+
+ model = self.motors[motor].model
+ search_baudrates = (
+ [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model]
+ )
+ expected_model_nb = self.model_number_table[model]
+
+ for baudrate in search_baudrates:
+ self.set_baudrate(baudrate)
+ for id_ in range(scs.MAX_ID + 1):
+ found_model = self.ping(id_)
+ if found_model is not None:
+ if found_model != expected_model_nb:
+ raise RuntimeError(
+ f"Found one motor on {baudrate=} with id={id_} but it has a "
+ f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. "
+ f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')."
+ )
+ return baudrate, id_
+
+ raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.")
+
+ def configure_motors(self, return_delay_time=0, maximum_acceleration=254, acceleration=254) -> None:
+ for motor in self.motors:
+ # By default, Feetech motors have a 500µs delay response time (corresponding to a value of 250 on
+ # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0).
+ self.write("Return_Delay_Time", motor, return_delay_time)
+ # Set 'Maximum_Acceleration' to 254 to speedup acceleration and deceleration of the motors.
+ if self.protocol_version == 0:
+ self.write("Maximum_Acceleration", motor, maximum_acceleration)
+ self.write("Acceleration", motor, acceleration)
+
+ @property
+ def is_calibrated(self) -> bool:
+ motors_calibration = self.read_calibration()
+ if set(motors_calibration) != set(self.calibration):
+ return False
+
+ same_ranges = all(
+ self.calibration[motor].range_min == cal.range_min
+ and self.calibration[motor].range_max == cal.range_max
+ for motor, cal in motors_calibration.items()
+ )
+ if self.protocol_version == 1:
+ return same_ranges
+
+ same_offsets = all(
+ self.calibration[motor].homing_offset == cal.homing_offset
+ for motor, cal in motors_calibration.items()
+ )
+ return same_ranges and same_offsets
+
+ def read_calibration(self) -> dict[str, MotorCalibration]:
+ offsets, mins, maxes = {}, {}, {}
+ for motor in self.motors:
+ mins[motor] = self.read("Min_Position_Limit", motor, normalize=False)
+ maxes[motor] = self.read("Max_Position_Limit", motor, normalize=False)
+ offsets[motor] = (
+ self.read("Homing_Offset", motor, normalize=False) if self.protocol_version == 0 else 0
+ )
+
+ calibration = {}
+ for motor, m in self.motors.items():
+ calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=offsets[motor],
+ range_min=mins[motor],
+ range_max=maxes[motor],
+ )
+
+ return calibration
+
+ def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
+ for motor, calibration in calibration_dict.items():
+ if self.protocol_version == 0:
+ self.write("Homing_Offset", motor, calibration.homing_offset)
+ self.write("Min_Position_Limit", motor, calibration.range_min)
+ self.write("Max_Position_Limit", motor, calibration.range_max)
+
+ if cache:
+ self.calibration = calibration_dict
+
+ def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]:
+ """
+ On Feetech Motors:
+ Present_Position = Actual_Position - Homing_Offset
+ """
+ half_turn_homings = {}
+ for motor, pos in positions.items():
+ model = self._get_motor_model(motor)
+ max_res = self.model_resolution_table[model] - 1
+ half_turn_homings[motor] = pos - int(max_res / 2)
+
+ return half_turn_homings
+
+ def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
+ for motor in self._get_motors_list(motors):
+ self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry)
+ self.write("Lock", motor, 0, num_retry=num_retry)
+
+ def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None:
+ addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable")
+ self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry)
+ addr, length = get_address(self.model_ctrl_table, model, "Lock")
+ self._write(addr, length, motor_id, 0, num_retry=num_retry)
+
+ def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
+ for motor in self._get_motors_list(motors):
+ self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry)
+ self.write("Lock", motor, 1, num_retry=num_retry)
+
+ def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
+ for id_ in ids_values:
+ model = self._id_to_model(id_)
+ encoding_table = self.model_encoding_table.get(model)
+ if encoding_table and data_name in encoding_table:
+ sign_bit = encoding_table[data_name]
+ ids_values[id_] = encode_sign_magnitude(ids_values[id_], sign_bit)
+
+ return ids_values
+
+ def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
+ for id_ in ids_values:
+ model = self._id_to_model(id_)
+ encoding_table = self.model_encoding_table.get(model)
+ if encoding_table and data_name in encoding_table:
+ sign_bit = encoding_table[data_name]
+ ids_values[id_] = decode_sign_magnitude(ids_values[id_], sign_bit)
+
+ return ids_values
+
+ def _split_into_byte_chunks(self, value: int, length: int) -> list[int]:
+ return _split_into_byte_chunks(value, length)
+
+ def _broadcast_ping(self) -> tuple[dict[int, int], int]:
+ import scservo_sdk as scs
+
+ data_list = {}
+
+ status_length = 6
+
+ rx_length = 0
+ wait_length = status_length * scs.MAX_ID
+
+ txpacket = [0] * 6
+
+ tx_time_per_byte = (1000.0 / self.port_handler.getBaudRate()) * 10.0
+
+ txpacket[scs.PKT_ID] = scs.BROADCAST_ID
+ txpacket[scs.PKT_LENGTH] = 2
+ txpacket[scs.PKT_INSTRUCTION] = scs.INST_PING
+
+ result = self.packet_handler.txPacket(self.port_handler, txpacket)
+ if result != scs.COMM_SUCCESS:
+ self.port_handler.is_using = False
+ return data_list, result
+
+ # set rx timeout
+ self.port_handler.setPacketTimeoutMillis((wait_length * tx_time_per_byte) + (3.0 * scs.MAX_ID) + 16.0)
+
+ rxpacket = []
+ while not self.port_handler.isPacketTimeout() and rx_length < wait_length:
+ rxpacket += self.port_handler.readPort(wait_length - rx_length)
+ rx_length = len(rxpacket)
+
+ self.port_handler.is_using = False
+
+ if rx_length == 0:
+ return data_list, scs.COMM_RX_TIMEOUT
+
+ while True:
+ if rx_length < status_length:
+ return data_list, scs.COMM_RX_CORRUPT
+
+ # find packet header
+ for idx in range(0, (rx_length - 1)):
+ if (rxpacket[idx] == 0xFF) and (rxpacket[idx + 1] == 0xFF):
+ break
+
+ if idx == 0: # found at the beginning of the packet
+ # calculate checksum
+ checksum = 0
+ for idx in range(2, status_length - 1): # except header & checksum
+ checksum += rxpacket[idx]
+
+ checksum = ~checksum & 0xFF
+ if rxpacket[status_length - 1] == checksum:
+ result = scs.COMM_SUCCESS
+ data_list[rxpacket[scs.PKT_ID]] = rxpacket[scs.PKT_ERROR]
+
+ del rxpacket[0:status_length]
+ rx_length = rx_length - status_length
+
+ if rx_length == 0:
+ return data_list, result
+ else:
+ result = scs.COMM_RX_CORRUPT
+ # remove header (0xFF 0xFF)
+ del rxpacket[0:2]
+ rx_length = rx_length - 2
+ else:
+ # remove unnecessary packets
+ del rxpacket[0:idx]
+ rx_length = rx_length - idx
+
+ def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None:
+ self._assert_protocol_is_compatible("broadcast_ping")
+ for n_try in range(1 + num_retry):
+ ids_status, comm = self._broadcast_ping()
+ if self._is_comm_success(comm):
+ break
+ logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})")
+ logger.debug(self.packet_handler.getTxRxResult(comm))
+
+ if not self._is_comm_success(comm):
+ if raise_on_error:
+ raise ConnectionError(self.packet_handler.getTxRxResult(comm))
+ return
+
+ ids_errors = {id_: status for id_, status in ids_status.items() if self._is_error(status)}
+ if ids_errors:
+ display_dict = {id_: self.packet_handler.getRxPacketError(err) for id_, err in ids_errors.items()}
+ logger.error(f"Some motors found returned an error status:\n{pformat(display_dict, indent=4)}")
+
+ return self._read_model_number(list(ids_status), raise_on_error)
+
+ def _read_firmware_version(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, str]:
+ firmware_versions = {}
+ for id_ in motor_ids:
+ firm_ver_major, comm, error = self._read(
+ *FIRMWARE_MAJOR_VERSION, id_, raise_on_error=raise_on_error
+ )
+ if not self._is_comm_success(comm) or self._is_error(error):
+ continue
+
+ firm_ver_minor, comm, error = self._read(
+ *FIRMWARE_MINOR_VERSION, id_, raise_on_error=raise_on_error
+ )
+ if not self._is_comm_success(comm) or self._is_error(error):
+ continue
+
+ firmware_versions[id_] = f"{firm_ver_major}.{firm_ver_minor}"
+
+ return firmware_versions
+
+ def _read_model_number(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, int]:
+ model_numbers = {}
+ for id_ in motor_ids:
+ model_nb, comm, error = self._read(*MODEL_NUMBER, id_, raise_on_error=raise_on_error)
+ if not self._is_comm_success(comm) or self._is_error(error):
+ continue
+
+ model_numbers[id_] = model_nb
+
+ return model_numbers
diff --git a/src/lerobot/motors/feetech/tables.py b/src/lerobot/motors/feetech/tables.py
new file mode 100644
index 0000000000..48814957f2
--- /dev/null
+++ b/src/lerobot/motors/feetech/tables.py
@@ -0,0 +1,252 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FIRMWARE_MAJOR_VERSION = (0, 1)
+FIRMWARE_MINOR_VERSION = (1, 1)
+MODEL_NUMBER = (3, 2)
+
+# TODO(Steven): Consider doing the following:
+# from enum import Enum
+# class MyControlTableKey(Enum):
+# ID = "ID"
+# GOAL_SPEED = "Goal_Speed"
+# ...
+#
+# MY_CONTROL_TABLE ={
+# MyControlTableKey.ID.value: (5,1)
+# MyControlTableKey.GOAL_SPEED.value: (46, 2)
+# ...
+# }
+# This allows me do to:
+# bus.write(MyControlTableKey.GOAL_SPEED, ...)
+# Instead of:
+# bus.write("Goal_Speed", ...)
+# This is important for two reasons:
+# 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError
+# 2. We can change the value of the MyControlTableKey enums without impacting the client code
+
+# data_name: (address, size_byte)
+# http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SMS-STS-emanual-229f4476422d4059abfb1cb0
+STS_SMS_SERIES_CONTROL_TABLE = {
+ # EPROM
+ "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only
+ "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only
+ "Model_Number": MODEL_NUMBER, # read-only
+ "ID": (5, 1),
+ "Baud_Rate": (6, 1),
+ "Return_Delay_Time": (7, 1),
+ "Response_Status_Level": (8, 1),
+ "Min_Position_Limit": (9, 2),
+ "Max_Position_Limit": (11, 2),
+ "Max_Temperature_Limit": (13, 1),
+ "Max_Voltage_Limit": (14, 1),
+ "Min_Voltage_Limit": (15, 1),
+ "Max_Torque_Limit": (16, 2),
+ "Phase": (18, 1),
+ "Unloading_Condition": (19, 1),
+ "LED_Alarm_Condition": (20, 1),
+ "P_Coefficient": (21, 1),
+ "D_Coefficient": (22, 1),
+ "I_Coefficient": (23, 1),
+ "Minimum_Startup_Force": (24, 2),
+ "CW_Dead_Zone": (26, 1),
+ "CCW_Dead_Zone": (27, 1),
+ "Protection_Current": (28, 2),
+ "Angular_Resolution": (30, 1),
+ "Homing_Offset": (31, 2),
+ "Operating_Mode": (33, 1),
+ "Protective_Torque": (34, 1),
+ "Protection_Time": (35, 1),
+ "Overload_Torque": (36, 1),
+ "Velocity_closed_loop_P_proportional_coefficient": (37, 1),
+ "Over_Current_Protection_Time": (38, 1),
+ "Velocity_closed_loop_I_integral_coefficient": (39, 1),
+ # SRAM
+ "Torque_Enable": (40, 1),
+ "Acceleration": (41, 1),
+ "Goal_Position": (42, 2),
+ "Goal_Time": (44, 2),
+ "Goal_Velocity": (46, 2),
+ "Torque_Limit": (48, 2),
+ "Lock": (55, 1),
+ "Present_Position": (56, 2), # read-only
+ "Present_Velocity": (58, 2), # read-only
+ "Present_Load": (60, 2), # read-only
+ "Present_Voltage": (62, 1), # read-only
+ "Present_Temperature": (63, 1), # read-only
+ "Status": (65, 1), # read-only
+ "Moving": (66, 1), # read-only
+ "Present_Current": (69, 2), # read-only
+ "Goal_Position_2": (71, 2), # read-only
+ # Factory
+ "Moving_Velocity": (80, 1),
+ "Moving_Velocity_Threshold": (80, 1),
+ "DTs": (81, 1), # (ms)
+ "Velocity_Unit_factor": (82, 1),
+ "Hts": (83, 1), # (ns) valid for firmware >= 2.54, other versions keep 0
+ "Maximum_Velocity_Limit": (84, 1),
+ "Maximum_Acceleration": (85, 1),
+ "Acceleration_Multiplier ": (86, 1), # Acceleration multiplier in effect when acceleration is 0
+}
+
+# http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SCSCL-emanual-cbcc8ab2e3384282a01d4bf3
+SCS_SERIES_CONTROL_TABLE = {
+ # EPROM
+ "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only
+ "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only
+ "Model_Number": MODEL_NUMBER, # read-only
+ "ID": (5, 1),
+ "Baud_Rate": (6, 1),
+ "Return_Delay_Time": (7, 1),
+ "Response_Status_Level": (8, 1),
+ "Min_Position_Limit": (9, 2),
+ "Max_Position_Limit": (11, 2),
+ "Max_Temperature_Limit": (13, 1),
+ "Max_Voltage_Limit": (14, 1),
+ "Min_Voltage_Limit": (15, 1),
+ "Max_Torque_Limit": (16, 2),
+ "Phase": (18, 1),
+ "Unloading_Condition": (19, 1),
+ "LED_Alarm_Condition": (20, 1),
+ "P_Coefficient": (21, 1),
+ "D_Coefficient": (22, 1),
+ "I_Coefficient": (23, 1),
+ "Minimum_Startup_Force": (24, 2),
+ "CW_Dead_Zone": (26, 1),
+ "CCW_Dead_Zone": (27, 1),
+ "Protective_Torque": (37, 1),
+ "Protection_Time": (38, 1),
+ # SRAM
+ "Torque_Enable": (40, 1),
+ "Acceleration": (41, 1),
+ "Goal_Position": (42, 2),
+ "Running_Time": (44, 2),
+ "Goal_Velocity": (46, 2),
+ "Lock": (48, 1),
+ "Present_Position": (56, 2), # read-only
+ "Present_Velocity": (58, 2), # read-only
+ "Present_Load": (60, 2), # read-only
+ "Present_Voltage": (62, 1), # read-only
+ "Present_Temperature": (63, 1), # read-only
+ "Sync_Write_Flag": (64, 1), # read-only
+ "Status": (65, 1), # read-only
+ "Moving": (66, 1), # read-only
+ # Factory
+ "PWM_Maximum_Step": (78, 1),
+ "Moving_Velocity_Threshold*50": (79, 1),
+ "DTs": (80, 1), # (ms)
+ "Minimum_Velocity_Limit*50": (81, 1),
+ "Maximum_Velocity_Limit*50": (82, 1),
+ "Acceleration_2": (83, 1), # don't know what that is
+}
+
+STS_SMS_SERIES_BAUDRATE_TABLE = {
+ 1_000_000: 0,
+ 500_000: 1,
+ 250_000: 2,
+ 128_000: 3,
+ 115_200: 4,
+ 57_600: 5,
+ 38_400: 6,
+ 19_200: 7,
+}
+
+SCS_SERIES_BAUDRATE_TABLE = {
+ 1_000_000: 0,
+ 500_000: 1,
+ 250_000: 2,
+ 128_000: 3,
+ 115_200: 4,
+ 57_600: 5,
+ 38_400: 6,
+ 19_200: 7,
+}
+
+MODEL_CONTROL_TABLE = {
+ "sts_series": STS_SMS_SERIES_CONTROL_TABLE,
+ "scs_series": SCS_SERIES_CONTROL_TABLE,
+ "sms_series": STS_SMS_SERIES_CONTROL_TABLE,
+ "sts3215": STS_SMS_SERIES_CONTROL_TABLE,
+ "sts3250": STS_SMS_SERIES_CONTROL_TABLE,
+ "scs0009": SCS_SERIES_CONTROL_TABLE,
+ "sm8512bl": STS_SMS_SERIES_CONTROL_TABLE,
+}
+
+MODEL_RESOLUTION = {
+ "sts_series": 4096,
+ "sms_series": 4096,
+ "scs_series": 1024,
+ "sts3215": 4096,
+ "sts3250": 4096,
+ "sm8512bl": 4096,
+ "scs0009": 1024,
+}
+
+MODEL_BAUDRATE_TABLE = {
+ "sts_series": STS_SMS_SERIES_BAUDRATE_TABLE,
+ "sms_series": STS_SMS_SERIES_BAUDRATE_TABLE,
+ "scs_series": SCS_SERIES_BAUDRATE_TABLE,
+ "sm8512bl": STS_SMS_SERIES_BAUDRATE_TABLE,
+ "sts3215": STS_SMS_SERIES_BAUDRATE_TABLE,
+ "sts3250": STS_SMS_SERIES_BAUDRATE_TABLE,
+ "scs0009": SCS_SERIES_BAUDRATE_TABLE,
+}
+
+# Sign-Magnitude encoding bits
+STS_SMS_SERIES_ENCODINGS_TABLE = {
+ "Homing_Offset": 11,
+ "Goal_Velocity": 15,
+ "Present_Velocity": 15,
+}
+
+MODEL_ENCODING_TABLE = {
+ "sts_series": STS_SMS_SERIES_ENCODINGS_TABLE,
+ "sms_series": STS_SMS_SERIES_ENCODINGS_TABLE,
+ "scs_series": {},
+ "sts3215": STS_SMS_SERIES_ENCODINGS_TABLE,
+ "sts3250": STS_SMS_SERIES_ENCODINGS_TABLE,
+ "sm8512bl": STS_SMS_SERIES_ENCODINGS_TABLE,
+ "scs0009": {},
+}
+
+SCAN_BAUDRATES = [
+ 4_800,
+ 9_600,
+ 14_400,
+ 19_200,
+ 38_400,
+ 57_600,
+ 115_200,
+ 128_000,
+ 250_000,
+ 500_000,
+ 1_000_000,
+]
+
+MODEL_NUMBER_TABLE = {
+ "sts3215": 777,
+ "sts3250": 2825,
+ "sm8512bl": 11272,
+ "scs0009": 1284,
+}
+
+MODEL_PROTOCOL = {
+ "sts_series": 0,
+ "sms_series": 0,
+ "scs_series": 1,
+ "sts3215": 0,
+ "sts3250": 0,
+ "sm8512bl": 0,
+ "scs0009": 1,
+}
diff --git a/src/lerobot/motors/motors_bus.py b/src/lerobot/motors/motors_bus.py
new file mode 100644
index 0000000000..597bcd3c4d
--- /dev/null
+++ b/src/lerobot/motors/motors_bus.py
@@ -0,0 +1,1220 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ruff: noqa: N802
+# This noqa is for the Protocols classes: PortHandler, PacketHandler GroupSyncRead/Write
+# TODO(aliberts): Add block noqa when feature below is available
+# https://github.com/astral-sh/ruff/issues/3711
+
+import abc
+import logging
+from contextlib import contextmanager
+from dataclasses import dataclass
+from enum import Enum
+from functools import cached_property
+from pprint import pformat
+from typing import Protocol, TypeAlias
+
+import serial
+from deepdiff import DeepDiff
+from tqdm import tqdm
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.utils.utils import enter_pressed, move_cursor_up
+
+NameOrID: TypeAlias = str | int
+Value: TypeAlias = int | float
+
+logger = logging.getLogger(__name__)
+
+
+def get_ctrl_table(model_ctrl_table: dict[str, dict], model: str) -> dict[str, tuple[int, int]]:
+ ctrl_table = model_ctrl_table.get(model)
+ if ctrl_table is None:
+ raise KeyError(f"Control table for {model=} not found.")
+ return ctrl_table
+
+
+def get_address(model_ctrl_table: dict[str, dict], model: str, data_name: str) -> tuple[int, int]:
+ ctrl_table = get_ctrl_table(model_ctrl_table, model)
+ addr_bytes = ctrl_table.get(data_name)
+ if addr_bytes is None:
+ raise KeyError(f"Address for '{data_name}' not found in {model} control table.")
+ return addr_bytes
+
+
+def assert_same_address(model_ctrl_table: dict[str, dict], motor_models: list[str], data_name: str) -> None:
+ all_addr = []
+ all_bytes = []
+ for model in motor_models:
+ addr, bytes = get_address(model_ctrl_table, model, data_name)
+ all_addr.append(addr)
+ all_bytes.append(bytes)
+
+ if len(set(all_addr)) != 1:
+ raise NotImplementedError(
+ f"At least two motor models use a different address for `data_name`='{data_name}'"
+ f"({list(zip(motor_models, all_addr, strict=False))})."
+ )
+
+ if len(set(all_bytes)) != 1:
+ raise NotImplementedError(
+ f"At least two motor models use a different bytes representation for `data_name`='{data_name}'"
+ f"({list(zip(motor_models, all_bytes, strict=False))})."
+ )
+
+
+class MotorNormMode(str, Enum):
+ RANGE_0_100 = "range_0_100"
+ RANGE_M100_100 = "range_m100_100"
+ DEGREES = "degrees"
+
+
+@dataclass
+class MotorCalibration:
+ id: int
+ drive_mode: int
+ homing_offset: int
+ range_min: int
+ range_max: int
+
+
+@dataclass
+class Motor:
+ id: int
+ model: str
+ norm_mode: MotorNormMode
+
+
+class JointOutOfRangeError(Exception):
+ def __init__(self, message="Joint is out of range"):
+ self.message = message
+ super().__init__(self.message)
+
+
+class PortHandler(Protocol):
+ def __init__(self, port_name):
+ self.is_open: bool
+ self.baudrate: int
+ self.packet_start_time: float
+ self.packet_timeout: float
+ self.tx_time_per_byte: float
+ self.is_using: bool
+ self.port_name: str
+ self.ser: serial.Serial
+
+ def openPort(self): ...
+ def closePort(self): ...
+ def clearPort(self): ...
+ def setPortName(self, port_name): ...
+ def getPortName(self): ...
+ def setBaudRate(self, baudrate): ...
+ def getBaudRate(self): ...
+ def getBytesAvailable(self): ...
+ def readPort(self, length): ...
+ def writePort(self, packet): ...
+ def setPacketTimeout(self, packet_length): ...
+ def setPacketTimeoutMillis(self, msec): ...
+ def isPacketTimeout(self): ...
+ def getCurrentTime(self): ...
+ def getTimeSinceStart(self): ...
+ def setupPort(self, cflag_baud): ...
+ def getCFlagBaud(self, baudrate): ...
+
+
+class PacketHandler(Protocol):
+ def getTxRxResult(self, result): ...
+ def getRxPacketError(self, error): ...
+ def txPacket(self, port, txpacket): ...
+ def rxPacket(self, port): ...
+ def txRxPacket(self, port, txpacket): ...
+ def ping(self, port, id): ...
+ def action(self, port, id): ...
+ def readTx(self, port, id, address, length): ...
+ def readRx(self, port, id, length): ...
+ def readTxRx(self, port, id, address, length): ...
+ def read1ByteTx(self, port, id, address): ...
+ def read1ByteRx(self, port, id): ...
+ def read1ByteTxRx(self, port, id, address): ...
+ def read2ByteTx(self, port, id, address): ...
+ def read2ByteRx(self, port, id): ...
+ def read2ByteTxRx(self, port, id, address): ...
+ def read4ByteTx(self, port, id, address): ...
+ def read4ByteRx(self, port, id): ...
+ def read4ByteTxRx(self, port, id, address): ...
+ def writeTxOnly(self, port, id, address, length, data): ...
+ def writeTxRx(self, port, id, address, length, data): ...
+ def write1ByteTxOnly(self, port, id, address, data): ...
+ def write1ByteTxRx(self, port, id, address, data): ...
+ def write2ByteTxOnly(self, port, id, address, data): ...
+ def write2ByteTxRx(self, port, id, address, data): ...
+ def write4ByteTxOnly(self, port, id, address, data): ...
+ def write4ByteTxRx(self, port, id, address, data): ...
+ def regWriteTxOnly(self, port, id, address, length, data): ...
+ def regWriteTxRx(self, port, id, address, length, data): ...
+ def syncReadTx(self, port, start_address, data_length, param, param_length): ...
+ def syncWriteTxOnly(self, port, start_address, data_length, param, param_length): ...
+
+
+class GroupSyncRead(Protocol):
+ def __init__(self, port, ph, start_address, data_length):
+ self.port: str
+ self.ph: PortHandler
+ self.start_address: int
+ self.data_length: int
+ self.last_result: bool
+ self.is_param_changed: bool
+ self.param: list
+ self.data_dict: dict
+
+ def makeParam(self): ...
+ def addParam(self, id): ...
+ def removeParam(self, id): ...
+ def clearParam(self): ...
+ def txPacket(self): ...
+ def rxPacket(self): ...
+ def txRxPacket(self): ...
+ def isAvailable(self, id, address, data_length): ...
+ def getData(self, id, address, data_length): ...
+
+
+class GroupSyncWrite(Protocol):
+ def __init__(self, port, ph, start_address, data_length):
+ self.port: str
+ self.ph: PortHandler
+ self.start_address: int
+ self.data_length: int
+ self.is_param_changed: bool
+ self.param: list
+ self.data_dict: dict
+
+ def makeParam(self): ...
+ def addParam(self, id, data): ...
+ def removeParam(self, id): ...
+ def changeParam(self, id, data): ...
+ def clearParam(self): ...
+ def txPacket(self): ...
+
+
+class MotorsBus(abc.ABC):
+ """
+ A MotorsBus allows to efficiently read and write to the attached motors.
+ It represents several motors daisy-chained together and connected through a serial port.
+ There are currently two implementations of this abstract class:
+ - DynamixelMotorsBus
+ - FeetechMotorsBus
+
+ Note: This class may evolve in the future should we add support for other types of bus.
+
+ A MotorsBus subclass instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
+ To find the port, you can run our utility script:
+ ```bash
+ python -m lerobot.find_port.py
+ >>> Finding all available ports for the MotorsBus.
+ >>> ["/dev/tty.usbmodem575E0032081", "/dev/tty.usbmodem575E0031751"]
+ >>> Remove the usb cable from your MotorsBus and press Enter when done.
+ >>> The port of this MotorsBus is /dev/tty.usbmodem575E0031751.
+ >>> Reconnect the usb cable.
+ ```
+
+ Example of usage for 1 Feetech sts3215 motor connected to the bus:
+ ```python
+ bus = FeetechMotorsBus(
+ port="/dev/tty.usbmodem575E0031751",
+ motors={"my_motor": (1, "sts3215")},
+ )
+ bus.connect()
+
+ position = bus.read("Present_Position", "my_motor", normalize=False)
+
+ # Move from a few motor steps as an example
+ few_steps = 30
+ bus.write("Goal_Position", "my_motor", position + few_steps, normalize=False)
+
+ # When done, properly disconnect the port using
+ bus.disconnect()
+ ```
+ """
+
+ apply_drive_mode: bool
+ available_baudrates: list[int]
+ default_baudrate: int
+ default_timeout: int
+ model_baudrate_table: dict[str, dict]
+ model_ctrl_table: dict[str, dict]
+ model_encoding_table: dict[str, dict]
+ model_number_table: dict[str, int]
+ model_resolution_table: dict[str, int]
+ normalized_data: list[str]
+
+ def __init__(
+ self,
+ port: str,
+ motors: dict[str, Motor],
+ calibration: dict[str, MotorCalibration] | None = None,
+ ):
+ self.port = port
+ self.motors = motors
+ self.calibration = calibration if calibration else {}
+
+ self.port_handler: PortHandler
+ self.packet_handler: PacketHandler
+ self.sync_reader: GroupSyncRead
+ self.sync_writer: GroupSyncWrite
+ self._comm_success: int
+ self._no_error: int
+
+ self._id_to_model_dict = {m.id: m.model for m in self.motors.values()}
+ self._id_to_name_dict = {m.id: motor for motor, m in self.motors.items()}
+ self._model_nb_to_model_dict = {v: k for k, v in self.model_number_table.items()}
+
+ self._validate_motors()
+
+ def __len__(self):
+ return len(self.motors)
+
+ def __repr__(self):
+ return (
+ f"{self.__class__.__name__}(\n"
+ f" Port: '{self.port}',\n"
+ f" Motors: \n{pformat(self.motors, indent=8, sort_dicts=False)},\n"
+ ")',\n"
+ )
+
+ @cached_property
+ def _has_different_ctrl_tables(self) -> bool:
+ if len(self.models) < 2:
+ return False
+
+ first_table = self.model_ctrl_table[self.models[0]]
+ return any(
+ DeepDiff(first_table, get_ctrl_table(self.model_ctrl_table, model)) for model in self.models[1:]
+ )
+
+ @cached_property
+ def models(self) -> list[str]:
+ return [m.model for m in self.motors.values()]
+
+ @cached_property
+ def ids(self) -> list[int]:
+ return [m.id for m in self.motors.values()]
+
+ def _model_nb_to_model(self, motor_nb: int) -> str:
+ return self._model_nb_to_model_dict[motor_nb]
+
+ def _id_to_model(self, motor_id: int) -> str:
+ return self._id_to_model_dict[motor_id]
+
+ def _id_to_name(self, motor_id: int) -> str:
+ return self._id_to_name_dict[motor_id]
+
+ def _get_motor_id(self, motor: NameOrID) -> int:
+ if isinstance(motor, str):
+ return self.motors[motor].id
+ elif isinstance(motor, int):
+ return motor
+ else:
+ raise TypeError(f"'{motor}' should be int, str.")
+
+ def _get_motor_model(self, motor: NameOrID) -> int:
+ if isinstance(motor, str):
+ return self.motors[motor].model
+ elif isinstance(motor, int):
+ return self._id_to_model_dict[motor]
+ else:
+ raise TypeError(f"'{motor}' should be int, str.")
+
+ def _get_motors_list(self, motors: str | list[str] | None) -> list[str]:
+ if motors is None:
+ return list(self.motors)
+ elif isinstance(motors, str):
+ return [motors]
+ elif isinstance(motors, list):
+ return motors.copy()
+ else:
+ raise TypeError(motors)
+
+ def _get_ids_values_dict(self, values: Value | dict[str, Value] | None) -> list[str]:
+ if isinstance(values, (int, float)):
+ return dict.fromkeys(self.ids, values)
+ elif isinstance(values, dict):
+ return {self.motors[motor].id: val for motor, val in values.items()}
+ else:
+ raise TypeError(f"'values' is expected to be a single value or a dict. Got {values}")
+
+ def _validate_motors(self) -> None:
+ if len(self.ids) != len(set(self.ids)):
+ raise ValueError(f"Some motors have the same id!\n{self}")
+
+ # Ensure ctrl table available for all models
+ for model in self.models:
+ get_ctrl_table(self.model_ctrl_table, model)
+
+ def _is_comm_success(self, comm: int) -> bool:
+ return comm == self._comm_success
+
+ def _is_error(self, error: int) -> bool:
+ return error != self._no_error
+
+ def _assert_motors_exist(self) -> None:
+ expected_models = {m.id: self.model_number_table[m.model] for m in self.motors.values()}
+
+ found_models = {}
+ for id_ in self.ids:
+ model_nb = self.ping(id_)
+ if model_nb is not None:
+ found_models[id_] = model_nb
+
+ missing_ids = [id_ for id_ in self.ids if id_ not in found_models]
+ wrong_models = {
+ id_: (expected_models[id_], found_models[id_])
+ for id_ in found_models
+ if expected_models.get(id_) != found_models[id_]
+ }
+
+ if missing_ids or wrong_models:
+ error_lines = [f"{self.__class__.__name__} motor check failed on port '{self.port}':"]
+
+ if missing_ids:
+ error_lines.append("\nMissing motor IDs:")
+ error_lines.extend(
+ f" - {id_} (expected model: {expected_models[id_]})" for id_ in missing_ids
+ )
+
+ if wrong_models:
+ error_lines.append("\nMotors with incorrect model numbers:")
+ error_lines.extend(
+ f" - {id_} ({self._id_to_name(id_)}): expected {expected}, found {found}"
+ for id_, (expected, found) in wrong_models.items()
+ )
+
+ error_lines.append("\nFull expected motor list (id: model_number):")
+ error_lines.append(pformat(expected_models, indent=4, sort_dicts=False))
+ error_lines.append("\nFull found motor list (id: model_number):")
+ error_lines.append(pformat(found_models, indent=4, sort_dicts=False))
+
+ raise RuntimeError("\n".join(error_lines))
+
+ @abc.abstractmethod
+ def _assert_protocol_is_compatible(self, instruction_name: str) -> None:
+ pass
+
+ @property
+ def is_connected(self) -> bool:
+ """bool: `True` if the underlying serial port is open."""
+ return self.port_handler.is_open
+
+ def connect(self, handshake: bool = True) -> None:
+ """Open the serial port and initialise communication.
+
+ Args:
+ handshake (bool, optional): Pings every expected motor and performs additional
+ integrity checks specific to the implementation. Defaults to `True`.
+
+ Raises:
+ DeviceAlreadyConnectedError: The port is already open.
+ ConnectionError: The underlying SDK failed to open the port or the handshake did not succeed.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(
+ f"{self.__class__.__name__}('{self.port}') is already connected. Do not call `{self.__class__.__name__}.connect()` twice."
+ )
+
+ self._connect(handshake)
+ self.set_timeout()
+ logger.debug(f"{self.__class__.__name__} connected.")
+
+ def _connect(self, handshake: bool = True) -> None:
+ try:
+ if not self.port_handler.openPort():
+ raise OSError(f"Failed to open port '{self.port}'.")
+ elif handshake:
+ self._handshake()
+ except (FileNotFoundError, OSError, serial.SerialException) as e:
+ raise ConnectionError(
+ f"\nCould not connect on port '{self.port}'. Make sure you are using the correct port."
+ "\nTry running `python -m lerobot.find_port`\n"
+ ) from e
+
+ @abc.abstractmethod
+ def _handshake(self) -> None:
+ pass
+
+ def disconnect(self, disable_torque: bool = True) -> None:
+ """Close the serial port (optionally disabling torque first).
+
+ Args:
+ disable_torque (bool, optional): If `True` (default) torque is disabled on every motor before
+ closing the port. This can prevent damaging motors if they are left applying resisting torque
+ after disconnect.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ f"{self.__class__.__name__}('{self.port}') is not connected. Try running `{self.__class__.__name__}.connect()` first."
+ )
+
+ if disable_torque:
+ self.port_handler.clearPort()
+ self.port_handler.is_using = False
+ self.disable_torque(num_retry=5)
+
+ self.port_handler.closePort()
+ logger.debug(f"{self.__class__.__name__} disconnected.")
+
+ @classmethod
+ def scan_port(cls, port: str, *args, **kwargs) -> dict[int, list[int]]:
+ """Probe *port* at every supported baud-rate and list responding IDs.
+
+ Args:
+ port (str): Serial/USB port to scan (e.g. ``"/dev/ttyUSB0"``).
+ *args, **kwargs: Forwarded to the subclass constructor.
+
+ Returns:
+ dict[int, list[int]]: Mapping *baud-rate → list of motor IDs*
+ for every baud-rate that produced at least one response.
+ """
+ bus = cls(port, {}, *args, **kwargs)
+ bus._connect(handshake=False)
+ baudrate_ids = {}
+ for baudrate in tqdm(bus.available_baudrates, desc="Scanning port"):
+ bus.set_baudrate(baudrate)
+ ids_models = bus.broadcast_ping()
+ if ids_models:
+ tqdm.write(f"Motors found for {baudrate=}: {pformat(ids_models, indent=4)}")
+ baudrate_ids[baudrate] = list(ids_models)
+
+ bus.port_handler.closePort()
+ return baudrate_ids
+
+ def setup_motor(
+ self, motor: str, initial_baudrate: int | None = None, initial_id: int | None = None
+ ) -> None:
+ """Assign the correct ID and baud-rate to a single motor.
+
+ This helper temporarily switches to the motor's current settings, disables torque, sets the desired
+ ID, and finally programs the bus' default baud-rate.
+
+ Args:
+ motor (str): Key of the motor in :pyattr:`motors`.
+ initial_baudrate (int | None, optional): Current baud-rate (skips scanning when provided).
+ Defaults to None.
+ initial_id (int | None, optional): Current ID (skips scanning when provided). Defaults to None.
+
+ Raises:
+ RuntimeError: The motor could not be found or its model number
+ does not match the expected one.
+ ConnectionError: Communication with the motor failed.
+ """
+ if not self.is_connected:
+ self._connect(handshake=False)
+
+ if initial_baudrate is None:
+ initial_baudrate, initial_id = self._find_single_motor(motor)
+
+ if initial_id is None:
+ _, initial_id = self._find_single_motor(motor, initial_baudrate)
+
+ model = self.motors[motor].model
+ target_id = self.motors[motor].id
+ self.set_baudrate(initial_baudrate)
+ self._disable_torque(initial_id, model)
+
+ # Set ID
+ addr, length = get_address(self.model_ctrl_table, model, "ID")
+ self._write(addr, length, initial_id, target_id)
+
+ # Set Baudrate
+ addr, length = get_address(self.model_ctrl_table, model, "Baud_Rate")
+ baudrate_value = self.model_baudrate_table[model][self.default_baudrate]
+ self._write(addr, length, target_id, baudrate_value)
+
+ self.set_baudrate(self.default_baudrate)
+
+ @abc.abstractmethod
+ def _find_single_motor(self, motor: str, initial_baudrate: int | None) -> tuple[int, int]:
+ pass
+
+ @abc.abstractmethod
+ def configure_motors(self) -> None:
+ """Write implementation-specific recommended settings to every motor.
+
+ Typical changes include shortening the return delay, increasing
+ acceleration limits or disabling safety locks.
+ """
+ pass
+
+ @abc.abstractmethod
+ def disable_torque(self, motors: int | str | list[str] | None = None, num_retry: int = 0) -> None:
+ """Disable torque on selected motors.
+
+ Disabling Torque allows to write to the motors' permanent memory area (EPROM/EEPROM).
+
+ Args:
+ motors (int | str | list[str] | None, optional): Target motors. Accepts a motor name, an ID, a
+ list of names or `None` to affect every registered motor. Defaults to `None`.
+ num_retry (int, optional): Number of additional retry attempts on communication failure.
+ Defaults to 0.
+ """
+ pass
+
+ @abc.abstractmethod
+ def _disable_torque(self, motor: int, model: str, num_retry: int = 0) -> None:
+ pass
+
+ @abc.abstractmethod
+ def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
+ """Enable torque on selected motors.
+
+ Args:
+ motor (int): Same semantics as :pymeth:`disable_torque`. Defaults to `None`.
+ num_retry (int, optional): Number of additional retry attempts on communication failure.
+ Defaults to 0.
+ """
+ pass
+
+ @contextmanager
+ def torque_disabled(self, motors: int | str | list[str] | None = None):
+ """Context-manager that guarantees torque is re-enabled.
+
+ This helper is useful to temporarily disable torque when configuring motors.
+
+ Examples:
+ >>> with bus.torque_disabled():
+ ... # Safe operations here
+ ... pass
+ """
+ self.disable_torque(motors)
+ try:
+ yield
+ finally:
+ self.enable_torque(motors)
+
+ def set_timeout(self, timeout_ms: int | None = None):
+ """Change the packet timeout used by the SDK.
+
+ Args:
+ timeout_ms (int | None, optional): Timeout in *milliseconds*. If `None` (default) the method falls
+ back to :pyattr:`default_timeout`.
+ """
+ timeout_ms = timeout_ms if timeout_ms is not None else self.default_timeout
+ self.port_handler.setPacketTimeoutMillis(timeout_ms)
+
+ def get_baudrate(self) -> int:
+ """Return the current baud-rate configured on the port.
+
+ Returns:
+ int: Baud-rate in bits / second.
+ """
+ return self.port_handler.getBaudRate()
+
+ def set_baudrate(self, baudrate: int) -> None:
+ """Set a new UART baud-rate on the port.
+
+ Args:
+ baudrate (int): Desired baud-rate in bits / second.
+
+ Raises:
+ RuntimeError: The SDK failed to apply the change.
+ """
+ present_bus_baudrate = self.port_handler.getBaudRate()
+ if present_bus_baudrate != baudrate:
+ logger.info(f"Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.")
+ self.port_handler.setBaudRate(baudrate)
+
+ if self.port_handler.getBaudRate() != baudrate:
+ raise RuntimeError("Failed to write bus baud rate.")
+
+ @property
+ @abc.abstractmethod
+ def is_calibrated(self) -> bool:
+ """bool: ``True`` if the cached calibration matches the motors."""
+ pass
+
+ @abc.abstractmethod
+ def read_calibration(self) -> dict[str, MotorCalibration]:
+ """Read calibration parameters from the motors.
+
+ Returns:
+ dict[str, MotorCalibration]: Mapping *motor name → calibration*.
+ """
+ pass
+
+ @abc.abstractmethod
+ def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
+ """Write calibration parameters to the motors and optionally cache them.
+
+ Args:
+ calibration_dict (dict[str, MotorCalibration]): Calibration obtained from
+ :pymeth:`read_calibration` or crafted by the user.
+ cache (bool, optional): Save the calibration to :pyattr:`calibration`. Defaults to True.
+ """
+ pass
+
+ def reset_calibration(self, motors: NameOrID | list[NameOrID] | None = None) -> None:
+ """Restore factory calibration for the selected motors.
+
+ Homing offset is set to ``0`` and min/max position limits are set to the full usable range.
+ The in-memory :pyattr:`calibration` is cleared.
+
+ Args:
+ motors (NameOrID | list[NameOrID] | None, optional): Selection of motors. `None` (default)
+ resets every motor.
+ """
+ if motors is None:
+ motors = list(self.motors)
+ elif isinstance(motors, (str, int)):
+ motors = [motors]
+ elif not isinstance(motors, list):
+ raise TypeError(motors)
+
+ for motor in motors:
+ model = self._get_motor_model(motor)
+ max_res = self.model_resolution_table[model] - 1
+ self.write("Homing_Offset", motor, 0, normalize=False)
+ self.write("Min_Position_Limit", motor, 0, normalize=False)
+ self.write("Max_Position_Limit", motor, max_res, normalize=False)
+
+ self.calibration = {}
+
+ def set_half_turn_homings(self, motors: NameOrID | list[NameOrID] | None = None) -> dict[NameOrID, Value]:
+ """Centre each motor range around its current position.
+
+ The function computes and writes a homing offset such that the present position becomes exactly one
+ half-turn (e.g. `2047` on a 12-bit encoder).
+
+ Args:
+ motors (NameOrID | list[NameOrID] | None, optional): Motors to adjust. Defaults to all motors (`None`).
+
+ Returns:
+ dict[NameOrID, Value]: Mapping *motor → written homing offset*.
+ """
+ if motors is None:
+ motors = list(self.motors)
+ elif isinstance(motors, (str, int)):
+ motors = [motors]
+ elif not isinstance(motors, list):
+ raise TypeError(motors)
+
+ self.reset_calibration(motors)
+ actual_positions = self.sync_read("Present_Position", motors, normalize=False)
+ homing_offsets = self._get_half_turn_homings(actual_positions)
+ for motor, offset in homing_offsets.items():
+ self.write("Homing_Offset", motor, offset)
+
+ return homing_offsets
+
+ @abc.abstractmethod
+ def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]:
+ pass
+
+ def record_ranges_of_motion(
+ self, motors: NameOrID | list[NameOrID] | None = None, display_values: bool = True
+ ) -> tuple[dict[NameOrID, Value], dict[NameOrID, Value]]:
+ """Interactively record the min/max encoder values of each motor.
+
+ Move the joints by hand (with torque disabled) while the method streams live positions. Press
+ :kbd:`Enter` to finish.
+
+ Args:
+ motors (NameOrID | list[NameOrID] | None, optional): Motors to record.
+ Defaults to every motor (`None`).
+ display_values (bool, optional): When `True` (default) a live table is printed to the console.
+
+ Returns:
+ tuple[dict[NameOrID, Value], dict[NameOrID, Value]]: Two dictionaries *mins* and *maxes* with the
+ extreme values observed for each motor.
+ """
+ if motors is None:
+ motors = list(self.motors)
+ elif isinstance(motors, (str, int)):
+ motors = [motors]
+ elif not isinstance(motors, list):
+ raise TypeError(motors)
+
+ start_positions = self.sync_read("Present_Position", motors, normalize=False)
+ mins = start_positions.copy()
+ maxes = start_positions.copy()
+
+ user_pressed_enter = False
+ while not user_pressed_enter:
+ positions = self.sync_read("Present_Position", motors, normalize=False)
+ mins = {motor: min(positions[motor], min_) for motor, min_ in mins.items()}
+ maxes = {motor: max(positions[motor], max_) for motor, max_ in maxes.items()}
+
+ if display_values:
+ print("\n-------------------------------------------")
+ print(f"{'NAME':<15} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}")
+ for motor in motors:
+ print(f"{motor:<15} | {mins[motor]:>6} | {positions[motor]:>6} | {maxes[motor]:>6}")
+
+ if enter_pressed():
+ user_pressed_enter = True
+
+ if display_values and not user_pressed_enter:
+ # Move cursor up to overwrite the previous output
+ move_cursor_up(len(motors) + 3)
+
+ same_min_max = [motor for motor in motors if mins[motor] == maxes[motor]]
+ if same_min_max:
+ raise ValueError(f"Some motors have the same min and max values:\n{pformat(same_min_max)}")
+
+ return mins, maxes
+
+ def _normalize(self, ids_values: dict[int, int]) -> dict[int, float]:
+ if not self.calibration:
+ raise RuntimeError(f"{self} has no calibration registered.")
+
+ normalized_values = {}
+ for id_, val in ids_values.items():
+ motor = self._id_to_name(id_)
+ min_ = self.calibration[motor].range_min
+ max_ = self.calibration[motor].range_max
+ drive_mode = self.apply_drive_mode and self.calibration[motor].drive_mode
+ if max_ == min_:
+ raise ValueError(f"Invalid calibration for motor '{motor}': min and max are equal.")
+
+ bounded_val = min(max_, max(min_, val))
+ if self.motors[motor].norm_mode is MotorNormMode.RANGE_M100_100:
+ norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100
+ normalized_values[id_] = -norm if drive_mode else norm
+ elif self.motors[motor].norm_mode is MotorNormMode.RANGE_0_100:
+ norm = ((bounded_val - min_) / (max_ - min_)) * 100
+ normalized_values[id_] = 100 - norm if drive_mode else norm
+ elif self.motors[motor].norm_mode is MotorNormMode.DEGREES:
+ mid = (min_ + max_) / 2
+ max_res = self.model_resolution_table[self._id_to_model(id_)] - 1
+ normalized_values[id_] = (val - mid) * 360 / max_res
+ else:
+ raise NotImplementedError
+
+ return normalized_values
+
+ def _unnormalize(self, ids_values: dict[int, float]) -> dict[int, int]:
+ if not self.calibration:
+ raise RuntimeError(f"{self} has no calibration registered.")
+
+ unnormalized_values = {}
+ for id_, val in ids_values.items():
+ motor = self._id_to_name(id_)
+ min_ = self.calibration[motor].range_min
+ max_ = self.calibration[motor].range_max
+ drive_mode = self.apply_drive_mode and self.calibration[motor].drive_mode
+ if max_ == min_:
+ raise ValueError(f"Invalid calibration for motor '{motor}': min and max are equal.")
+
+ if self.motors[motor].norm_mode is MotorNormMode.RANGE_M100_100:
+ val = -val if drive_mode else val
+ bounded_val = min(100.0, max(-100.0, val))
+ unnormalized_values[id_] = int(((bounded_val + 100) / 200) * (max_ - min_) + min_)
+ elif self.motors[motor].norm_mode is MotorNormMode.RANGE_0_100:
+ val = 100 - val if drive_mode else val
+ bounded_val = min(100.0, max(0.0, val))
+ unnormalized_values[id_] = int((bounded_val / 100) * (max_ - min_) + min_)
+ elif self.motors[motor].norm_mode is MotorNormMode.DEGREES:
+ mid = (min_ + max_) / 2
+ max_res = self.model_resolution_table[self._id_to_model(id_)] - 1
+ unnormalized_values[id_] = int((val * max_res / 360) + mid)
+ else:
+ raise NotImplementedError
+
+ return unnormalized_values
+
+ @abc.abstractmethod
+ def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
+ pass
+
+ @abc.abstractmethod
+ def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
+ pass
+
+ def _serialize_data(self, value: int, length: int) -> list[int]:
+ """
+ Converts an unsigned integer value into a list of byte-sized integers to be sent via a communication
+ protocol. Depending on the protocol, split values can be in big-endian or little-endian order.
+
+ Supported data length for both Feetech and Dynamixel:
+ - 1 (for values 0 to 255)
+ - 2 (for values 0 to 65,535)
+ - 4 (for values 0 to 4,294,967,295)
+ """
+ if value < 0:
+ raise ValueError(f"Negative values are not allowed: {value}")
+
+ max_value = {1: 0xFF, 2: 0xFFFF, 4: 0xFFFFFFFF}.get(length)
+ if max_value is None:
+ raise NotImplementedError(f"Unsupported byte size: {length}. Expected [1, 2, 4].")
+
+ if value > max_value:
+ raise ValueError(f"Value {value} exceeds the maximum for {length} bytes ({max_value}).")
+
+ return self._split_into_byte_chunks(value, length)
+
+ @abc.abstractmethod
+ def _split_into_byte_chunks(self, value: int, length: int) -> list[int]:
+ """Convert an integer into a list of byte-sized integers."""
+ pass
+
+ def ping(self, motor: NameOrID, num_retry: int = 0, raise_on_error: bool = False) -> int | None:
+ """Ping a single motor and return its model number.
+
+ Args:
+ motor (NameOrID): Target motor (name or ID).
+ num_retry (int, optional): Extra attempts before giving up. Defaults to `0`.
+ raise_on_error (bool, optional): If `True` communication errors raise exceptions instead of
+ returning `None`. Defaults to `False`.
+
+ Returns:
+ int | None: Motor model number or `None` on failure.
+ """
+ id_ = self._get_motor_id(motor)
+ for n_try in range(1 + num_retry):
+ model_number, comm, error = self.packet_handler.ping(self.port_handler, id_)
+ if self._is_comm_success(comm):
+ break
+ logger.debug(f"ping failed for {id_=}: {n_try=} got {comm=} {error=}")
+
+ if not self._is_comm_success(comm):
+ if raise_on_error:
+ raise ConnectionError(self.packet_handler.getTxRxResult(comm))
+ else:
+ return
+ if self._is_error(error):
+ if raise_on_error:
+ raise RuntimeError(self.packet_handler.getRxPacketError(error))
+ else:
+ return
+
+ return model_number
+
+ @abc.abstractmethod
+ def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None:
+ """Ping every ID on the bus using the broadcast address.
+
+ Args:
+ num_retry (int, optional): Retry attempts. Defaults to `0`.
+ raise_on_error (bool, optional): When `True` failures raise an exception instead of returning
+ `None`. Defaults to `False`.
+
+ Returns:
+ dict[int, int] | None: Mapping *id → model number* or `None` if the call failed.
+ """
+ pass
+
+ def read(
+ self,
+ data_name: str,
+ motor: str,
+ *,
+ normalize: bool = True,
+ num_retry: int = 0,
+ ) -> Value:
+ """Read a register from a motor.
+
+ Args:
+ data_name (str): Control-table key (e.g. `"Present_Position"`).
+ motor (str): Motor name.
+ normalize (bool, optional): When `True` (default) scale the value to a user-friendly range as
+ defined by the calibration.
+ num_retry (int, optional): Retry attempts. Defaults to `0`.
+
+ Returns:
+ Value: Raw or normalised value depending on *normalize*.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`."
+ )
+
+ id_ = self.motors[motor].id
+ model = self.motors[motor].model
+ addr, length = get_address(self.model_ctrl_table, model, data_name)
+
+ err_msg = f"Failed to read '{data_name}' on {id_=} after {num_retry + 1} tries."
+ value, _, _ = self._read(addr, length, id_, num_retry=num_retry, raise_on_error=True, err_msg=err_msg)
+
+ id_value = self._decode_sign(data_name, {id_: value})
+
+ if normalize and data_name in self.normalized_data:
+ id_value = self._normalize(id_value)
+
+ return id_value[id_]
+
+ def _read(
+ self,
+ address: int,
+ length: int,
+ motor_id: int,
+ *,
+ num_retry: int = 0,
+ raise_on_error: bool = True,
+ err_msg: str = "",
+ ) -> tuple[int, int]:
+ if length == 1:
+ read_fn = self.packet_handler.read1ByteTxRx
+ elif length == 2:
+ read_fn = self.packet_handler.read2ByteTxRx
+ elif length == 4:
+ read_fn = self.packet_handler.read4ByteTxRx
+ else:
+ raise ValueError(length)
+
+ for n_try in range(1 + num_retry):
+ value, comm, error = read_fn(self.port_handler, motor_id, address)
+ if self._is_comm_success(comm):
+ break
+ logger.debug(
+ f"Failed to read @{address=} ({length=}) on {motor_id=} ({n_try=}): "
+ + self.packet_handler.getTxRxResult(comm)
+ )
+
+ if not self._is_comm_success(comm) and raise_on_error:
+ raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}")
+ elif self._is_error(error) and raise_on_error:
+ raise RuntimeError(f"{err_msg} {self.packet_handler.getRxPacketError(error)}")
+
+ return value, comm, error
+
+ def write(
+ self, data_name: str, motor: str, value: Value, *, normalize: bool = True, num_retry: int = 0
+ ) -> None:
+ """Write a value to a single motor's register.
+
+ Contrary to :pymeth:`sync_write`, this expects a response status packet emitted by the motor, which
+ provides a guarantee that the value was written to the register successfully. In consequence, it is
+ slower than :pymeth:`sync_write` but it is more reliable. It should typically be used when configuring
+ motors.
+
+ Args:
+ data_name (str): Register name.
+ motor (str): Motor name.
+ value (Value): Value to write. If *normalize* is `True` the value is first converted to raw
+ units.
+ normalize (bool, optional): Enable or disable normalisation. Defaults to `True`.
+ num_retry (int, optional): Retry attempts. Defaults to `0`.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`."
+ )
+
+ id_ = self.motors[motor].id
+ model = self.motors[motor].model
+ addr, length = get_address(self.model_ctrl_table, model, data_name)
+
+ if normalize and data_name in self.normalized_data:
+ value = self._unnormalize({id_: value})[id_]
+
+ value = self._encode_sign(data_name, {id_: value})[id_]
+
+ err_msg = f"Failed to write '{data_name}' on {id_=} with '{value}' after {num_retry + 1} tries."
+ self._write(addr, length, id_, value, num_retry=num_retry, raise_on_error=True, err_msg=err_msg)
+
+ def _write(
+ self,
+ addr: int,
+ length: int,
+ motor_id: int,
+ value: int,
+ *,
+ num_retry: int = 0,
+ raise_on_error: bool = True,
+ err_msg: str = "",
+ ) -> tuple[int, int]:
+ data = self._serialize_data(value, length)
+ for n_try in range(1 + num_retry):
+ comm, error = self.packet_handler.writeTxRx(self.port_handler, motor_id, addr, length, data)
+ if self._is_comm_success(comm):
+ break
+ logger.debug(
+ f"Failed to sync write @{addr=} ({length=}) on id={motor_id} with {value=} ({n_try=}): "
+ + self.packet_handler.getTxRxResult(comm)
+ )
+
+ if not self._is_comm_success(comm) and raise_on_error:
+ raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}")
+ elif self._is_error(error) and raise_on_error:
+ raise RuntimeError(f"{err_msg} {self.packet_handler.getRxPacketError(error)}")
+
+ return comm, error
+
+ def sync_read(
+ self,
+ data_name: str,
+ motors: str | list[str] | None = None,
+ *,
+ normalize: bool = True,
+ num_retry: int = 0,
+ ) -> dict[str, Value]:
+ """Read the same register from several motors at once.
+
+ Args:
+ data_name (str): Register name.
+ motors (str | list[str] | None, optional): Motors to query. `None` (default) reads every motor.
+ normalize (bool, optional): Normalisation flag. Defaults to `True`.
+ num_retry (int, optional): Retry attempts. Defaults to `0`.
+
+ Returns:
+ dict[str, Value]: Mapping *motor name → value*.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`."
+ )
+
+ self._assert_protocol_is_compatible("sync_read")
+
+ names = self._get_motors_list(motors)
+ ids = [self.motors[motor].id for motor in names]
+ models = [self.motors[motor].model for motor in names]
+
+ if self._has_different_ctrl_tables:
+ assert_same_address(self.model_ctrl_table, models, data_name)
+
+ model = next(iter(models))
+ addr, length = get_address(self.model_ctrl_table, model, data_name)
+
+ err_msg = f"Failed to sync read '{data_name}' on {ids=} after {num_retry + 1} tries."
+ ids_values, _ = self._sync_read(
+ addr, length, ids, num_retry=num_retry, raise_on_error=True, err_msg=err_msg
+ )
+
+ ids_values = self._decode_sign(data_name, ids_values)
+
+ if normalize and data_name in self.normalized_data:
+ ids_values = self._normalize(ids_values)
+
+ return {self._id_to_name(id_): value for id_, value in ids_values.items()}
+
+ def _sync_read(
+ self,
+ addr: int,
+ length: int,
+ motor_ids: list[int],
+ *,
+ num_retry: int = 0,
+ raise_on_error: bool = True,
+ err_msg: str = "",
+ ) -> tuple[dict[int, int], int]:
+ self._setup_sync_reader(motor_ids, addr, length)
+ for n_try in range(1 + num_retry):
+ comm = self.sync_reader.txRxPacket()
+ if self._is_comm_success(comm):
+ break
+ logger.debug(
+ f"Failed to sync read @{addr=} ({length=}) on {motor_ids=} ({n_try=}): "
+ + self.packet_handler.getTxRxResult(comm)
+ )
+
+ if not self._is_comm_success(comm) and raise_on_error:
+ raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}")
+
+ values = {id_: self.sync_reader.getData(id_, addr, length) for id_ in motor_ids}
+ return values, comm
+
+ def _setup_sync_reader(self, motor_ids: list[int], addr: int, length: int) -> None:
+ self.sync_reader.clearParam()
+ self.sync_reader.start_address = addr
+ self.sync_reader.data_length = length
+ for id_ in motor_ids:
+ self.sync_reader.addParam(id_)
+
+ # TODO(aliberts, pkooij): Implementing something like this could get even much faster read times if need be.
+ # Would have to handle the logic of checking if a packet has been sent previously though but doable.
+ # This could be at the cost of increase latency between the moment the data is produced by the motors and
+ # the moment it is used by a policy.
+ # def _async_read(self, motor_ids: list[int], address: int, length: int):
+ # if self.sync_reader.start_address != address or self.sync_reader.data_length != length or ...:
+ # self._setup_sync_reader(motor_ids, address, length)
+ # else:
+ # self.sync_reader.rxPacket()
+ # self.sync_reader.txPacket()
+
+ # for id_ in motor_ids:
+ # value = self.sync_reader.getData(id_, address, length)
+
+ def sync_write(
+ self,
+ data_name: str,
+ values: Value | dict[str, Value],
+ *,
+ normalize: bool = True,
+ num_retry: int = 0,
+ ) -> None:
+ """Write the same register on multiple motors.
+
+ Contrary to :pymeth:`write`, this *does not* expects a response status packet emitted by the motor, which
+ can allow for lost packets. It is faster than :pymeth:`write` and should typically be used when
+ frequency matters and losing some packets is acceptable (e.g. teleoperation loops).
+
+ Args:
+ data_name (str): Register name.
+ values (Value | dict[str, Value]): Either a single value (applied to every motor) or a mapping
+ *motor name → value*.
+ normalize (bool, optional): If `True` (default) convert values from the user range to raw units.
+ num_retry (int, optional): Retry attempts. Defaults to `0`.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`."
+ )
+
+ ids_values = self._get_ids_values_dict(values)
+ models = [self._id_to_model(id_) for id_ in ids_values]
+ if self._has_different_ctrl_tables:
+ assert_same_address(self.model_ctrl_table, models, data_name)
+
+ model = next(iter(models))
+ addr, length = get_address(self.model_ctrl_table, model, data_name)
+
+ if normalize and data_name in self.normalized_data:
+ ids_values = self._unnormalize(ids_values)
+
+ ids_values = self._encode_sign(data_name, ids_values)
+
+ err_msg = f"Failed to sync write '{data_name}' with {ids_values=} after {num_retry + 1} tries."
+ self._sync_write(addr, length, ids_values, num_retry=num_retry, raise_on_error=True, err_msg=err_msg)
+
+ def _sync_write(
+ self,
+ addr: int,
+ length: int,
+ ids_values: dict[int, int],
+ num_retry: int = 0,
+ raise_on_error: bool = True,
+ err_msg: str = "",
+ ) -> int:
+ self._setup_sync_writer(ids_values, addr, length)
+ for n_try in range(1 + num_retry):
+ comm = self.sync_writer.txPacket()
+ if self._is_comm_success(comm):
+ break
+ logger.debug(
+ f"Failed to sync write @{addr=} ({length=}) with {ids_values=} ({n_try=}): "
+ + self.packet_handler.getTxRxResult(comm)
+ )
+
+ if not self._is_comm_success(comm) and raise_on_error:
+ raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}")
+
+ return comm
+
+ def _setup_sync_writer(self, ids_values: dict[int, int], addr: int, length: int) -> None:
+ self.sync_writer.clearParam()
+ self.sync_writer.start_address = addr
+ self.sync_writer.data_length = length
+ for id_, value in ids_values.items():
+ data = self._serialize_data(value, length)
+ self.sync_writer.addParam(id_, data)
diff --git a/src/lerobot/optim/__init__.py b/src/lerobot/optim/__init__.py
new file mode 100644
index 0000000000..de2c4c9965
--- /dev/null
+++ b/src/lerobot/optim/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .optimizers import OptimizerConfig as OptimizerConfig
diff --git a/src/lerobot/optim/factory.py b/src/lerobot/optim/factory.py
new file mode 100644
index 0000000000..bab95d0ce3
--- /dev/null
+++ b/src/lerobot/optim/factory.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import LRScheduler
+
+from lerobot.configs.train import TrainPipelineConfig
+from lerobot.policies.pretrained import PreTrainedPolicy
+
+
+def make_optimizer_and_scheduler(
+ cfg: TrainPipelineConfig, policy: PreTrainedPolicy
+) -> tuple[Optimizer, LRScheduler | None]:
+ """Generates the optimizer and scheduler based on configs.
+
+ Args:
+ cfg (TrainPipelineConfig): The training config that contains optimizer and scheduler configs
+ policy (PreTrainedPolicy): The policy config from which parameters and presets must be taken from.
+
+ Returns:
+ tuple[Optimizer, LRScheduler | None]: The couple (Optimizer, Scheduler). Scheduler can be `None`.
+ """
+ params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters()
+ optimizer = cfg.optimizer.build(params)
+ lr_scheduler = cfg.scheduler.build(optimizer, cfg.steps) if cfg.scheduler is not None else None
+ return optimizer, lr_scheduler
diff --git a/src/lerobot/optim/optimizers.py b/src/lerobot/optim/optimizers.py
new file mode 100644
index 0000000000..ece4dc157a
--- /dev/null
+++ b/src/lerobot/optim/optimizers.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any
+
+import draccus
+import torch
+from safetensors.torch import load_file, save_file
+
+from lerobot.constants import (
+ OPTIMIZER_PARAM_GROUPS,
+ OPTIMIZER_STATE,
+)
+from lerobot.datasets.utils import flatten_dict, unflatten_dict, write_json
+from lerobot.utils.io_utils import deserialize_json_into_object
+
+
+@dataclass
+class OptimizerConfig(draccus.ChoiceRegistry, abc.ABC):
+ lr: float
+ weight_decay: float
+ grad_clip_norm: float
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
+
+ @classmethod
+ def default_choice_name(cls) -> str | None:
+ return "adam"
+
+ @abc.abstractmethod
+ def build(self) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]:
+ """
+ Build the optimizer. It can be a single optimizer or a dictionary of optimizers.
+ NOTE: Multiple optimizers are useful when you have different models to optimize.
+ For example, you can have one optimizer for the policy and another one for the value function
+ in reinforcement learning settings.
+
+ Returns:
+ The optimizer or a dictionary of optimizers.
+ """
+ raise NotImplementedError
+
+
+@OptimizerConfig.register_subclass("adam")
+@dataclass
+class AdamConfig(OptimizerConfig):
+ lr: float = 1e-3
+ betas: tuple[float, float] = (0.9, 0.999)
+ eps: float = 1e-8
+ weight_decay: float = 0.0
+ grad_clip_norm: float = 10.0
+
+ def build(self, params: dict) -> torch.optim.Optimizer:
+ kwargs = asdict(self)
+ kwargs.pop("grad_clip_norm")
+ return torch.optim.Adam(params, **kwargs)
+
+
+@OptimizerConfig.register_subclass("adamw")
+@dataclass
+class AdamWConfig(OptimizerConfig):
+ lr: float = 1e-3
+ betas: tuple[float, float] = (0.9, 0.999)
+ eps: float = 1e-8
+ weight_decay: float = 1e-2
+ grad_clip_norm: float = 10.0
+
+ def build(self, params: dict) -> torch.optim.Optimizer:
+ kwargs = asdict(self)
+ kwargs.pop("grad_clip_norm")
+ return torch.optim.AdamW(params, **kwargs)
+
+
+@OptimizerConfig.register_subclass("sgd")
+@dataclass
+class SGDConfig(OptimizerConfig):
+ lr: float = 1e-3
+ momentum: float = 0.0
+ dampening: float = 0.0
+ nesterov: bool = False
+ weight_decay: float = 0.0
+ grad_clip_norm: float = 10.0
+
+ def build(self, params: dict) -> torch.optim.Optimizer:
+ kwargs = asdict(self)
+ kwargs.pop("grad_clip_norm")
+ return torch.optim.SGD(params, **kwargs)
+
+
+@OptimizerConfig.register_subclass("multi_adam")
+@dataclass
+class MultiAdamConfig(OptimizerConfig):
+ """Configuration for multiple Adam optimizers with different parameter groups.
+
+ This creates a dictionary of Adam optimizers, each with its own hyperparameters.
+
+ Args:
+ lr: Default learning rate (used if not specified for a group)
+ weight_decay: Default weight decay (used if not specified for a group)
+ optimizer_groups: Dictionary mapping parameter group names to their hyperparameters
+ grad_clip_norm: Gradient clipping norm
+ """
+
+ lr: float = 1e-3
+ weight_decay: float = 0.0
+ grad_clip_norm: float = 10.0
+ optimizer_groups: dict[str, dict[str, Any]] = field(default_factory=dict)
+
+ def build(self, params_dict: dict[str, list]) -> dict[str, torch.optim.Optimizer]:
+ """Build multiple Adam optimizers.
+
+ Args:
+ params_dict: Dictionary mapping parameter group names to lists of parameters
+ The keys should match the keys in optimizer_groups
+
+ Returns:
+ Dictionary mapping parameter group names to their optimizers
+ """
+ optimizers = {}
+
+ for name, params in params_dict.items():
+ # Get group-specific hyperparameters or use defaults
+ group_config = self.optimizer_groups.get(name, {})
+
+ # Create optimizer with merged parameters (defaults + group-specific)
+ optimizer_kwargs = {
+ "lr": group_config.get("lr", self.lr),
+ "betas": group_config.get("betas", (0.9, 0.999)),
+ "eps": group_config.get("eps", 1e-5),
+ "weight_decay": group_config.get("weight_decay", self.weight_decay),
+ }
+
+ optimizers[name] = torch.optim.Adam(params, **optimizer_kwargs)
+
+ return optimizers
+
+
+def save_optimizer_state(
+ optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path
+) -> None:
+ """Save optimizer state to disk.
+
+ Args:
+ optimizer: Either a single optimizer or a dictionary of optimizers.
+ save_dir: Directory to save the optimizer state.
+ """
+ if isinstance(optimizer, dict):
+ # Handle dictionary of optimizers
+ for name, opt in optimizer.items():
+ optimizer_dir = save_dir / name
+ optimizer_dir.mkdir(exist_ok=True, parents=True)
+ _save_single_optimizer_state(opt, optimizer_dir)
+ else:
+ # Handle single optimizer
+ _save_single_optimizer_state(optimizer, save_dir)
+
+
+def _save_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> None:
+ """Save a single optimizer's state to disk."""
+ state = optimizer.state_dict()
+ param_groups = state.pop("param_groups")
+ flat_state = flatten_dict(state)
+ save_file(flat_state, save_dir / OPTIMIZER_STATE)
+ write_json(param_groups, save_dir / OPTIMIZER_PARAM_GROUPS)
+
+
+def load_optimizer_state(
+ optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path
+) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]:
+ """Load optimizer state from disk.
+
+ Args:
+ optimizer: Either a single optimizer or a dictionary of optimizers.
+ save_dir: Directory to load the optimizer state from.
+
+ Returns:
+ The updated optimizer(s) with loaded state.
+ """
+ if isinstance(optimizer, dict):
+ # Handle dictionary of optimizers
+ loaded_optimizers = {}
+ for name, opt in optimizer.items():
+ optimizer_dir = save_dir / name
+ if optimizer_dir.exists():
+ loaded_optimizers[name] = _load_single_optimizer_state(opt, optimizer_dir)
+ else:
+ loaded_optimizers[name] = opt
+ return loaded_optimizers
+ else:
+ # Handle single optimizer
+ return _load_single_optimizer_state(optimizer, save_dir)
+
+
+def _load_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> torch.optim.Optimizer:
+ """Load a single optimizer's state from disk."""
+ current_state_dict = optimizer.state_dict()
+ flat_state = load_file(save_dir / OPTIMIZER_STATE)
+ state = unflatten_dict(flat_state)
+
+ # Handle case where 'state' key might not exist (for newly created optimizers)
+ if "state" in state:
+ loaded_state_dict = {"state": {int(k): v for k, v in state["state"].items()}}
+ else:
+ loaded_state_dict = {"state": {}}
+
+ if "param_groups" in current_state_dict:
+ param_groups = deserialize_json_into_object(
+ save_dir / OPTIMIZER_PARAM_GROUPS, current_state_dict["param_groups"]
+ )
+ loaded_state_dict["param_groups"] = param_groups
+
+ optimizer.load_state_dict(loaded_state_dict)
+ return optimizer
diff --git a/src/lerobot/optim/schedulers.py b/src/lerobot/optim/schedulers.py
new file mode 100644
index 0000000000..d080181755
--- /dev/null
+++ b/src/lerobot/optim/schedulers.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+import math
+from dataclasses import asdict, dataclass
+from pathlib import Path
+
+import draccus
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import LambdaLR, LRScheduler
+
+from lerobot.constants import SCHEDULER_STATE
+from lerobot.datasets.utils import write_json
+from lerobot.utils.io_utils import deserialize_json_into_object
+
+
+@dataclass
+class LRSchedulerConfig(draccus.ChoiceRegistry, abc.ABC):
+ num_warmup_steps: int
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
+
+ @abc.abstractmethod
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LRScheduler | None:
+ raise NotImplementedError
+
+
+@LRSchedulerConfig.register_subclass("diffuser")
+@dataclass
+class DiffuserSchedulerConfig(LRSchedulerConfig):
+ name: str = "cosine"
+ num_warmup_steps: int | None = None
+
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR:
+ from diffusers.optimization import get_scheduler
+
+ kwargs = {**asdict(self), "num_training_steps": num_training_steps, "optimizer": optimizer}
+ return get_scheduler(**kwargs)
+
+
+@LRSchedulerConfig.register_subclass("vqbet")
+@dataclass
+class VQBeTSchedulerConfig(LRSchedulerConfig):
+ num_warmup_steps: int
+ num_vqvae_training_steps: int
+ num_cycles: float = 0.5
+
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR:
+ def lr_lambda(current_step):
+ if current_step < self.num_vqvae_training_steps:
+ return float(1)
+ else:
+ adjusted_step = current_step - self.num_vqvae_training_steps
+ if adjusted_step < self.num_warmup_steps:
+ return float(adjusted_step) / float(max(1, self.num_warmup_steps))
+ progress = float(adjusted_step - self.num_warmup_steps) / float(
+ max(1, num_training_steps - self.num_warmup_steps)
+ )
+ return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.num_cycles) * 2.0 * progress)))
+
+ return LambdaLR(optimizer, lr_lambda, -1)
+
+
+@LRSchedulerConfig.register_subclass("cosine_decay_with_warmup")
+@dataclass
+class CosineDecayWithWarmupSchedulerConfig(LRSchedulerConfig):
+ """Used by Physical Intelligence to train Pi0"""
+
+ num_warmup_steps: int
+ num_decay_steps: int
+ peak_lr: float
+ decay_lr: float
+
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR:
+ del num_training_steps
+
+ def lr_lambda(current_step):
+ def linear_warmup_schedule(current_step):
+ if current_step <= 0:
+ return 1 / (self.num_warmup_steps + 1)
+ frac = 1 - current_step / self.num_warmup_steps
+ return (1 / (self.num_warmup_steps + 1) - 1) * frac + 1
+
+ def cosine_decay_schedule(current_step):
+ step = min(current_step, self.num_decay_steps)
+ cosine_decay = 0.5 * (1 + math.cos(math.pi * step / self.num_decay_steps))
+ alpha = self.decay_lr / self.peak_lr
+ decayed = (1 - alpha) * cosine_decay + alpha
+ return decayed
+
+ if current_step < self.num_warmup_steps:
+ return linear_warmup_schedule(current_step)
+
+ return cosine_decay_schedule(current_step)
+
+ return LambdaLR(optimizer, lr_lambda, -1)
+
+
+def save_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> None:
+ state_dict = scheduler.state_dict()
+ write_json(state_dict, save_dir / SCHEDULER_STATE)
+
+
+def load_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> LRScheduler:
+ state_dict = deserialize_json_into_object(save_dir / SCHEDULER_STATE, scheduler.state_dict())
+ scheduler.load_state_dict(state_dict)
+ return scheduler
diff --git a/src/lerobot/policies/__init__.py b/src/lerobot/policies/__init__.py
new file mode 100644
index 0000000000..9cb0f62341
--- /dev/null
+++ b/src/lerobot/policies/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .act.configuration_act import ACTConfig as ACTConfig
+from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig
+from .pi0.configuration_pi0 import PI0Config as PI0Config
+from .smolvla.configuration_smolvla import SmolVLAConfig as SmolVLAConfig
+from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig
+from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig
diff --git a/lerobot/common/policies/act/configuration_act.py b/src/lerobot/policies/act/configuration_act.py
similarity index 86%
rename from lerobot/common/policies/act/configuration_act.py
rename to src/lerobot/policies/act/configuration_act.py
index a86c359c99..6f6c1c4be8 100644
--- a/lerobot/common/policies/act/configuration_act.py
+++ b/src/lerobot/policies/act/configuration_act.py
@@ -15,9 +15,14 @@
# limitations under the License.
from dataclasses import dataclass, field
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import NormalizationMode
+from lerobot.optim.optimizers import AdamWConfig
+
+@PreTrainedConfig.register_subclass("act")
@dataclass
-class ACTConfig:
+class ACTConfig(PreTrainedConfig):
"""Configuration class for the Action Chunking Transformers policy.
Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer".
@@ -59,7 +64,7 @@ class ACTConfig:
output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
original scale. Note that this is also used for normalizing the training targets.
vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
- pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
`None` means no pretrained weights.
replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated
convolution.
@@ -90,28 +95,11 @@ class ACTConfig:
chunk_size: int = 100
n_action_steps: int = 100
- input_shapes: dict[str, list[int]] = field(
+ normalization_mapping: dict[str, NormalizationMode] = field(
default_factory=lambda: {
- "observation.images.top": [3, 480, 640],
- "observation.state": [14],
- }
- )
- output_shapes: dict[str, list[int]] = field(
- default_factory=lambda: {
- "action": [14],
- }
- )
-
- # Normalization / Unnormalization
- input_normalization_modes: dict[str, str] = field(
- default_factory=lambda: {
- "observation.images.top": "mean_std",
- "observation.state": "mean_std",
- }
- )
- output_normalization_modes: dict[str, str] = field(
- default_factory=lambda: {
- "action": "mean_std",
+ "VISUAL": NormalizationMode.MEAN_STD,
+ "STATE": NormalizationMode.MEAN_STD,
+ "ACTION": NormalizationMode.MEAN_STD,
}
)
@@ -144,7 +132,14 @@ class ACTConfig:
dropout: float = 0.1
kl_weight: float = 10.0
+ # Training preset
+ optimizer_lr: float = 1e-5
+ optimizer_weight_decay: float = 1e-4
+ optimizer_lr_backbone: float = 1e-5
+
def __post_init__(self):
+ super().__post_init__()
+
"""Input validation (not exhaustive)."""
if not self.vision_backbone.startswith("resnet"):
raise ValueError(
@@ -164,8 +159,28 @@ def __post_init__(self):
raise ValueError(
f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`"
)
- if (
- not any(k.startswith("observation.image") for k in self.input_shapes)
- and "observation.environment_state" not in self.input_shapes
- ):
+
+ def get_optimizer_preset(self) -> AdamWConfig:
+ return AdamWConfig(
+ lr=self.optimizer_lr,
+ weight_decay=self.optimizer_weight_decay,
+ )
+
+ def get_scheduler_preset(self) -> None:
+ return None
+
+ def validate_features(self) -> None:
+ if not self.image_features and not self.env_state_feature:
raise ValueError("You must provide at least one image or the environment state among the inputs.")
+
+ @property
+ def observation_delta_indices(self) -> None:
+ return None
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(self.chunk_size))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/lerobot/common/policies/act/modeling_act.py b/src/lerobot/policies/act/modeling_act.py
similarity index 84%
rename from lerobot/common/policies/act/modeling_act.py
rename to src/lerobot/policies/act/modeling_act.py
index 418863a143..4a048e63d3 100644
--- a/lerobot/common/policies/act/modeling_act.py
+++ b/src/lerobot/policies/act/modeling_act.py
@@ -15,46 +15,42 @@
# limitations under the License.
"""Action Chunking Transformer Policy
-As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705).
+As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://huggingface.co/papers/2304.13705).
The majority of changes here involve removing unused code, unifying naming, and adding helpful comments.
"""
import math
from collections import deque
+from collections.abc import Callable
from itertools import chain
-from typing import Callable
import einops
import numpy as np
import torch
import torch.nn.functional as F # noqa: N812
import torchvision
-from huggingface_hub import PyTorchModelHubMixin
from torch import Tensor, nn
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.ops.misc import FrozenBatchNorm2d
-from lerobot.common.policies.act.configuration_act import ACTConfig
-from lerobot.common.policies.normalize import Normalize, Unnormalize
+from lerobot.constants import ACTION, OBS_IMAGES
+from lerobot.policies.act.configuration_act import ACTConfig
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pretrained import PreTrainedPolicy
-class ACTPolicy(
- nn.Module,
- PyTorchModelHubMixin,
- library_name="lerobot",
- repo_url="https://github.com/huggingface/lerobot",
- tags=["robotics", "act"],
-):
+class ACTPolicy(PreTrainedPolicy):
"""
Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost
- Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act)
+ Hardware (paper: https://huggingface.co/papers/2304.13705, code: https://github.com/tonyzhaozh/act)
"""
+ config_class = ACTConfig
name = "act"
def __init__(
self,
- config: ACTConfig | None = None,
+ config: ACTConfig,
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
):
"""
@@ -64,30 +60,46 @@ def __init__(
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
that they will be passed with a call to `load_state_dict` before the policy is used.
"""
- super().__init__()
- if config is None:
- config = ACTConfig()
- self.config: ACTConfig = config
+ super().__init__(config)
+ config.validate_features()
+ self.config = config
- self.normalize_inputs = Normalize(
- config.input_shapes, config.input_normalization_modes, dataset_stats
- )
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
self.normalize_targets = Normalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
self.unnormalize_outputs = Unnormalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
self.model = ACT(config)
- self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")]
-
if config.temporal_ensemble_coeff is not None:
self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size)
self.reset()
+ def get_optim_params(self) -> dict:
+ # TODO(aliberts, rcadene): As of now, lr_backbone == lr
+ # Should we remove this and just `return self.parameters()`?
+ return [
+ {
+ "params": [
+ p
+ for n, p in self.named_parameters()
+ if not n.startswith("model.backbone") and p.requires_grad
+ ]
+ },
+ {
+ "params": [
+ p
+ for n, p in self.named_parameters()
+ if n.startswith("model.backbone") and p.requires_grad
+ ],
+ "lr": self.config.optimizer_lr_backbone,
+ },
+ ]
+
def reset(self):
"""This should be called whenever the environment is reset."""
if self.config.temporal_ensemble_coeff is not None:
@@ -95,7 +107,7 @@ def reset(self):
else:
self._action_queue = deque([], maxlen=self.config.n_action_steps)
- @torch.no_grad
+ @torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select a single action given environment observations.
@@ -103,45 +115,49 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
environment. It works by managing the actions in a queue and only calling `select_actions` when the
queue is empty.
"""
- self.eval()
-
- batch = self.normalize_inputs(batch)
- if len(self.expected_image_keys) > 0:
- batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4)
+ self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed
- # If we are doing temporal ensembling, do online updates where we keep track of the number of actions
- # we are ensembling over.
if self.config.temporal_ensemble_coeff is not None:
- actions = self.model(batch)[0] # (batch_size, chunk_size, action_dim)
- actions = self.unnormalize_outputs({"action": actions})["action"]
+ actions = self.predict_action_chunk(batch)
action = self.temporal_ensembler.update(actions)
return action
# Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
# querying the policy.
if len(self._action_queue) == 0:
- actions = self.model(batch)[0][:, : self.config.n_action_steps]
-
- # TODO(rcadene): make _forward return output dictionary?
- actions = self.unnormalize_outputs({"action": actions})["action"]
+ actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps]
# `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
# effectively has shape (n_action_steps, batch_size, *), hence the transpose.
self._action_queue.extend(actions.transpose(0, 1))
return self._action_queue.popleft()
- def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Predict a chunk of actions given environment observations."""
+ self.eval()
+
+ batch = self.normalize_inputs(batch)
+ if self.config.image_features:
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
+ batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features]
+
+ actions = self.model(batch)[0]
+ actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
+ return actions
+
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
"""Run the batch through the model and compute the loss for training or validation."""
batch = self.normalize_inputs(batch)
- if len(self.expected_image_keys) > 0:
+ if self.config.image_features:
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4)
+ batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features]
+
batch = self.normalize_targets(batch)
actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch)
l1_loss = (
- F.l1_loss(batch["action"], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1)
+ F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1)
).mean()
loss_dict = {"l1_loss": l1_loss.item()}
@@ -149,21 +165,21 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
# Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for
# each dimension independently, we sum over the latent dimension to get the total
# KL-divergence per batch element, then take the mean over the batch.
- # (See App. B of https://arxiv.org/abs/1312.6114 for more details).
+ # (See App. B of https://huggingface.co/papers/1312.6114 for more details).
mean_kld = (
(-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean()
)
loss_dict["kld_loss"] = mean_kld.item()
- loss_dict["loss"] = l1_loss + mean_kld * self.config.kl_weight
+ loss = l1_loss + mean_kld * self.config.kl_weight
else:
- loss_dict["loss"] = l1_loss
+ loss = l1_loss
- return loss_dict
+ return loss, loss_dict
class ACTTemporalEnsembler:
def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None:
- """Temporal ensembling as described in Algorithm 2 of https://arxiv.org/abs/2304.13705.
+ """Temporal ensembling as described in Algorithm 2 of https://huggingface.co/papers/2304.13705.
The weights are calculated as wᵢ = exp(-temporal_ensemble_coeff * i) where w₀ is the oldest action.
They are then normalized to sum to 1 by dividing by Σwᵢ. Here's some intuition around how the
@@ -200,7 +216,7 @@ def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None:
continue
avg *= exp_weights[:i].sum()
avg += item * exp_weights[i]
- avg /= exp_weights[:i+1].sum()
+ avg /= exp_weights[: i + 1].sum()
print("online", avg)
```
"""
@@ -288,31 +304,30 @@ class ACT(nn.Module):
"""
def __init__(self, config: ACTConfig):
- super().__init__()
- self.config = config
# BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence].
# The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]).
- self.use_robot_state = "observation.state" in config.input_shapes
- self.use_images = any(k.startswith("observation.image") for k in config.input_shapes)
- self.use_env_state = "observation.environment_state" in config.input_shapes
+ super().__init__()
+ self.config = config
+
if self.config.use_vae:
self.vae_encoder = ACTEncoder(config, is_vae_encoder=True)
self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model)
# Projection layer for joint-space configuration to hidden dimension.
- if self.use_robot_state:
+ if self.config.robot_state_feature:
self.vae_encoder_robot_state_input_proj = nn.Linear(
- config.input_shapes["observation.state"][0], config.dim_model
+ self.config.robot_state_feature.shape[0], config.dim_model
)
# Projection layer for action (joint-space target) to hidden dimension.
self.vae_encoder_action_input_proj = nn.Linear(
- config.output_shapes["action"][0], config.dim_model
+ self.config.action_feature.shape[0],
+ config.dim_model,
)
# Projection layer from the VAE encoder's output to the latent distribution's parameter space.
self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2)
# Fixed sinusoidal positional embedding for the input to the VAE encoder. Unsqueeze for batch
# dimension.
num_input_token_encoder = 1 + config.chunk_size
- if self.use_robot_state:
+ if self.config.robot_state_feature:
num_input_token_encoder += 1
self.register_buffer(
"vae_encoder_pos_enc",
@@ -320,7 +335,7 @@ def __init__(self, config: ACTConfig):
)
# Backbone for image feature extraction.
- if self.use_images:
+ if self.config.image_features:
backbone_model = getattr(torchvision.models, config.vision_backbone)(
replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation],
weights=config.pretrained_backbone_weights,
@@ -337,27 +352,27 @@ def __init__(self, config: ACTConfig):
# Transformer encoder input projections. The tokens will be structured like
# [latent, (robot_state), (env_state), (image_feature_map_pixels)].
- if self.use_robot_state:
+ if self.config.robot_state_feature:
self.encoder_robot_state_input_proj = nn.Linear(
- config.input_shapes["observation.state"][0], config.dim_model
+ self.config.robot_state_feature.shape[0], config.dim_model
)
- if self.use_env_state:
+ if self.config.env_state_feature:
self.encoder_env_state_input_proj = nn.Linear(
- config.input_shapes["observation.environment_state"][0], config.dim_model
+ self.config.env_state_feature.shape[0], config.dim_model
)
self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model)
- if self.use_images:
+ if self.config.image_features:
self.encoder_img_feat_input_proj = nn.Conv2d(
backbone_model.fc.in_features, config.dim_model, kernel_size=1
)
# Transformer encoder positional embeddings.
n_1d_tokens = 1 # for the latent
- if self.use_robot_state:
+ if self.config.robot_state_feature:
n_1d_tokens += 1
- if self.use_env_state:
+ if self.config.env_state_feature:
n_1d_tokens += 1
self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model)
- if self.use_images:
+ if self.config.image_features:
self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2)
# Transformer decoder.
@@ -365,7 +380,7 @@ def __init__(self, config: ACTConfig):
self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model)
# Final action regression head on the output of the transformer's decoder.
- self.action_head = nn.Linear(config.dim_model, config.output_shapes["action"][0])
+ self.action_head = nn.Linear(config.dim_model, self.config.action_feature.shape[0])
self._reset_parameters()
@@ -380,13 +395,13 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso
`batch` should have the following structure:
{
- "observation.state" (optional): (B, state_dim) batch of robot states.
+ [robot_state_feature] (optional): (B, state_dim) batch of robot states.
- "observation.images": (B, n_cameras, C, H, W) batch of images.
+ [image_features]: (B, n_cameras, C, H, W) batch of images.
AND/OR
- "observation.environment_state": (B, env_dim) batch of environment states.
+ [env_state_feature]: (B, env_dim) batch of environment states.
- "action" (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions.
+ [action_feature] (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions.
}
Returns:
@@ -395,15 +410,14 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso
latent dimension.
"""
if self.config.use_vae and self.training:
- assert (
- "action" in batch
- ), "actions must be provided when using the variational objective in training mode."
+ assert "action" in batch, (
+ "actions must be provided when using the variational objective in training mode."
+ )
- batch_size = (
- batch["observation.images"]
- if "observation.images" in batch
- else batch["observation.environment_state"]
- ).shape[0]
+ if "observation.images" in batch:
+ batch_size = batch["observation.images"][0].shape[0]
+ else:
+ batch_size = batch["observation.environment_state"].shape[0]
# Prepare the latent for input to the transformer encoder.
if self.config.use_vae and "action" in batch:
@@ -411,12 +425,12 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso
cls_embed = einops.repeat(
self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size
) # (B, 1, D)
- if self.use_robot_state:
+ if self.config.robot_state_feature:
robot_state_embed = self.vae_encoder_robot_state_input_proj(batch["observation.state"])
robot_state_embed = robot_state_embed.unsqueeze(1) # (B, 1, D)
action_embed = self.vae_encoder_action_input_proj(batch["action"]) # (B, S, D)
- if self.use_robot_state:
+ if self.config.robot_state_feature:
vae_encoder_input = [cls_embed, robot_state_embed, action_embed] # (B, S+2, D)
else:
vae_encoder_input = [cls_embed, action_embed]
@@ -430,7 +444,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso
# sequence depending whether we use the input states or not (cls and robot state)
# False means not a padding token.
cls_joint_is_pad = torch.full(
- (batch_size, 2 if self.use_robot_state else 1),
+ (batch_size, 2 if self.config.robot_state_feature else 1),
False,
device=batch["observation.state"].device,
)
@@ -463,33 +477,31 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso
encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)]
encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1))
# Robot state token.
- if self.use_robot_state:
+ if self.config.robot_state_feature:
encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch["observation.state"]))
# Environment state token.
- if self.use_env_state:
+ if self.config.env_state_feature:
encoder_in_tokens.append(
self.encoder_env_state_input_proj(batch["observation.environment_state"])
)
- # Camera observation features and positional embeddings.
- if self.use_images:
- all_cam_features = []
- all_cam_pos_embeds = []
-
- for cam_index in range(batch["observation.images"].shape[-4]):
- cam_features = self.backbone(batch["observation.images"][:, cam_index])["feature_map"]
- # TODO(rcadene, alexander-soare): remove call to `.to` to speedup forward ; precompute and use
- # buffer
+ if self.config.image_features:
+ # For a list of images, the H and W may vary but H*W is constant.
+ # NOTE: If modifying this section, verify on MPS devices that
+ # gradients remain stable (no explosions or NaNs).
+ for img in batch["observation.images"]:
+ cam_features = self.backbone(img)["feature_map"]
cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype)
- cam_features = self.encoder_img_feat_input_proj(cam_features) # (B, C, h, w)
- all_cam_features.append(cam_features)
- all_cam_pos_embeds.append(cam_pos_embed)
- # Concatenate camera observation feature maps and positional embeddings along the width dimension,
- # and move to (sequence, batch, dim).
- all_cam_features = torch.cat(all_cam_features, axis=-1)
- encoder_in_tokens.extend(einops.rearrange(all_cam_features, "b c h w -> (h w) b c"))
- all_cam_pos_embeds = torch.cat(all_cam_pos_embeds, axis=-1)
- encoder_in_pos_embed.extend(einops.rearrange(all_cam_pos_embeds, "b c h w -> (h w) b c"))
+ cam_features = self.encoder_img_feat_input_proj(cam_features)
+
+ # Rearrange features to (sequence, batch, dim).
+ cam_features = einops.rearrange(cam_features, "b c h w -> (h w) b c")
+ cam_pos_embed = einops.rearrange(cam_pos_embed, "b c h w -> (h w) b c")
+
+ # Extend immediately instead of accumulating and concatenating
+ # Convert to list to extend properly
+ encoder_in_tokens.extend(list(cam_features))
+ encoder_in_pos_embed.extend(list(cam_pos_embed))
# Stack all tokens along the sequence dimension.
encoder_in_tokens = torch.stack(encoder_in_tokens, axis=0)
diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/src/lerobot/policies/diffusion/configuration_diffusion.py
similarity index 75%
rename from lerobot/common/policies/diffusion/configuration_diffusion.py
rename to src/lerobot/policies/diffusion/configuration_diffusion.py
index 531f49e4d7..ce2de7052b 100644
--- a/lerobot/common/policies/diffusion/configuration_diffusion.py
+++ b/src/lerobot/policies/diffusion/configuration_diffusion.py
@@ -16,9 +16,15 @@
# limitations under the License.
from dataclasses import dataclass, field
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import NormalizationMode
+from lerobot.optim.optimizers import AdamConfig
+from lerobot.optim.schedulers import DiffuserSchedulerConfig
+
+@PreTrainedConfig.register_subclass("diffusion")
@dataclass
-class DiffusionConfig:
+class DiffusionConfig(PreTrainedConfig):
"""Configuration class for DiffusionPolicy.
Defaults are configured for training with PushT providing proprioceptive and single camera observations.
@@ -62,7 +68,7 @@ class DiffusionConfig:
within the image size. If None, no cropping is done.
crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
mode).
- pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
`None` means no pretrained weights.
use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
The group sizes are set to be about 16 (to be precise, feature_dim // 16).
@@ -75,7 +81,7 @@ class DiffusionConfig:
n_groups: Number of groups used in the group norm of the Unet's convolutional blocks.
diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear
network. This is the output dimension of that network, i.e., the embedding dimension.
- use_film_scale_modulation: FiLM (https://arxiv.org/abs/1709.07871) is used for the Unet conditioning.
+ use_film_scale_modulation: FiLM (https://huggingface.co/papers/1709.07871) is used for the Unet conditioning.
Bias modulation is used be default, while this parameter indicates whether to also use scale
modulation.
noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"].
@@ -93,7 +99,7 @@ class DiffusionConfig:
num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly
spaced). If not provided, this defaults to be the same as `num_train_timesteps`.
do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See
- `LeRobotDataset` and `load_previous_and_future_frames` for mor information. Note, this defaults
+ `LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults
to False as the original Diffusion Policy implementation does the same.
"""
@@ -102,26 +108,17 @@ class DiffusionConfig:
horizon: int = 16
n_action_steps: int = 8
- input_shapes: dict[str, list[int]] = field(
- default_factory=lambda: {
- "observation.image": [3, 96, 96],
- "observation.state": [2],
- }
- )
- output_shapes: dict[str, list[int]] = field(
+ normalization_mapping: dict[str, NormalizationMode] = field(
default_factory=lambda: {
- "action": [2],
+ "VISUAL": NormalizationMode.MEAN_STD,
+ "STATE": NormalizationMode.MIN_MAX,
+ "ACTION": NormalizationMode.MIN_MAX,
}
)
- # Normalization / Unnormalization
- input_normalization_modes: dict[str, str] = field(
- default_factory=lambda: {
- "observation.image": "mean_std",
- "observation.state": "min_max",
- }
- )
- output_normalization_modes: dict[str, str] = field(default_factory=lambda: {"action": "min_max"})
+ # The original implementation doesn't sample frames for the last 7 steps,
+ # which avoids excessive padding and leads to improved training results.
+ drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1
# Architecture / modeling.
# Vision backbone.
@@ -154,39 +151,23 @@ class DiffusionConfig:
# Loss computation
do_mask_loss_for_padding: bool = False
+ # Training presets
+ optimizer_lr: float = 1e-4
+ optimizer_betas: tuple = (0.95, 0.999)
+ optimizer_eps: float = 1e-8
+ optimizer_weight_decay: float = 1e-6
+ scheduler_name: str = "cosine"
+ scheduler_warmup_steps: int = 500
+
def __post_init__(self):
+ super().__post_init__()
+
"""Input validation (not exhaustive)."""
if not self.vision_backbone.startswith("resnet"):
raise ValueError(
f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
)
- image_keys = {k for k in self.input_shapes if k.startswith("observation.image")}
-
- if len(image_keys) == 0 and "observation.environment_state" not in self.input_shapes:
- raise ValueError("You must provide at least one image or the environment state among the inputs.")
-
- if len(image_keys) > 0:
- if self.crop_shape is not None:
- for image_key in image_keys:
- if (
- self.crop_shape[0] > self.input_shapes[image_key][1]
- or self.crop_shape[1] > self.input_shapes[image_key][2]
- ):
- raise ValueError(
- f"`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} "
- f"for `crop_shape` and {self.input_shapes[image_key]} for "
- "`input_shapes[{image_key}]`."
- )
- # Check that all input images have the same shape.
- first_image_key = next(iter(image_keys))
- for image_key in image_keys:
- if self.input_shapes[image_key] != self.input_shapes[first_image_key]:
- raise ValueError(
- f"`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we "
- "expect all image shapes to match."
- )
-
supported_prediction_types = ["epsilon", "sample"]
if self.prediction_type not in supported_prediction_types:
raise ValueError(
@@ -207,3 +188,50 @@ def __post_init__(self):
"The horizon should be an integer multiple of the downsampling factor (which is determined "
f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}"
)
+
+ def get_optimizer_preset(self) -> AdamConfig:
+ return AdamConfig(
+ lr=self.optimizer_lr,
+ betas=self.optimizer_betas,
+ eps=self.optimizer_eps,
+ weight_decay=self.optimizer_weight_decay,
+ )
+
+ def get_scheduler_preset(self) -> DiffuserSchedulerConfig:
+ return DiffuserSchedulerConfig(
+ name=self.scheduler_name,
+ num_warmup_steps=self.scheduler_warmup_steps,
+ )
+
+ def validate_features(self) -> None:
+ if len(self.image_features) == 0 and self.env_state_feature is None:
+ raise ValueError("You must provide at least one image or the environment state among the inputs.")
+
+ if self.crop_shape is not None:
+ for key, image_ft in self.image_features.items():
+ if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]:
+ raise ValueError(
+ f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} "
+ f"for `crop_shape` and {image_ft.shape} for "
+ f"`{key}`."
+ )
+
+ # Check that all input images have the same shape.
+ first_image_key, first_image_ft = next(iter(self.image_features.items()))
+ for key, image_ft in self.image_features.items():
+ if image_ft.shape != first_image_ft.shape:
+ raise ValueError(
+ f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match."
+ )
+
+ @property
+ def observation_delta_indices(self) -> list:
+ return list(range(1 - self.n_obs_steps, 1))
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/src/lerobot/policies/diffusion/modeling_diffusion.py
similarity index 87%
rename from lerobot/common/policies/diffusion/modeling_diffusion.py
rename to src/lerobot/policies/diffusion/modeling_diffusion.py
index 9ba5626007..24b273967e 100644
--- a/lerobot/common/policies/diffusion/modeling_diffusion.py
+++ b/src/lerobot/policies/diffusion/modeling_diffusion.py
@@ -22,7 +22,7 @@
import math
from collections import deque
-from typing import Callable
+from collections.abc import Callable
import einops
import numpy as np
@@ -31,35 +31,32 @@
import torchvision
from diffusers.schedulers.scheduling_ddim import DDIMScheduler
from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
-from huggingface_hub import PyTorchModelHubMixin
from torch import Tensor, nn
-from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
-from lerobot.common.policies.normalize import Normalize, Unnormalize
-from lerobot.common.policies.utils import (
+from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE
+from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.utils import (
get_device_from_parameters,
get_dtype_from_parameters,
+ get_output_shape,
populate_queues,
)
-class DiffusionPolicy(
- nn.Module,
- PyTorchModelHubMixin,
- library_name="lerobot",
- repo_url="https://github.com/huggingface/lerobot",
- tags=["robotics", "diffusion-policy"],
-):
+class DiffusionPolicy(PreTrainedPolicy):
"""
Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion"
- (paper: https://arxiv.org/abs/2303.04137, code: https://github.com/real-stanford/diffusion_policy).
+ (paper: https://huggingface.co/papers/2303.04137, code: https://github.com/real-stanford/diffusion_policy).
"""
+ config_class = DiffusionConfig
name = "diffusion"
def __init__(
self,
- config: DiffusionConfig | None = None,
+ config: DiffusionConfig,
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
):
"""
@@ -69,18 +66,16 @@ def __init__(
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
that they will be passed with a call to `load_state_dict` before the policy is used.
"""
- super().__init__()
- if config is None:
- config = DiffusionConfig()
+ super().__init__(config)
+ config.validate_features()
self.config = config
- self.normalize_inputs = Normalize(
- config.input_shapes, config.input_normalization_modes, dataset_stats
- )
+
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
self.normalize_targets = Normalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
self.unnormalize_outputs = Unnormalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
# queues are populated during rollout of the policy, they contain the n latest observations and actions
@@ -88,23 +83,35 @@ def __init__(
self.diffusion = DiffusionModel(config)
- self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")]
- self.use_env_state = "observation.environment_state" in config.input_shapes
-
self.reset()
+ def get_optim_params(self) -> dict:
+ return self.diffusion.parameters()
+
def reset(self):
"""Clear observation and action queues. Should be called on `env.reset()`"""
self._queues = {
"observation.state": deque(maxlen=self.config.n_obs_steps),
"action": deque(maxlen=self.config.n_action_steps),
}
- if len(self.expected_image_keys) > 0:
+ if self.config.image_features:
self._queues["observation.images"] = deque(maxlen=self.config.n_obs_steps)
- if self.use_env_state:
+ if self.config.env_state_feature:
self._queues["observation.environment_state"] = deque(maxlen=self.config.n_obs_steps)
- @torch.no_grad
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Predict a chunk of actions given environment observations."""
+ # stack n latest observations from the queue
+ batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
+ actions = self.diffusion.generate_actions(batch)
+
+ # TODO(rcadene): make above methods return output dictionary?
+ actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
+
+ return actions
+
+ @torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select a single action given environment observations.
@@ -127,34 +134,29 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
actually measured from the first observation which (if `n_obs_steps` > 1) happened in the past.
"""
batch = self.normalize_inputs(batch)
- if len(self.expected_image_keys) > 0:
+ if self.config.image_features:
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4)
+ batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
# Note: It's important that this happens after stacking the images into a single key.
self._queues = populate_queues(self._queues, batch)
- if len(self._queues["action"]) == 0:
- # stack n latest observations from the queue
- batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
- actions = self.diffusion.generate_actions(batch)
-
- # TODO(rcadene): make above methods return output dictionary?
- actions = self.unnormalize_outputs({"action": actions})["action"]
+ if len(self._queues[ACTION]) == 0:
+ actions = self.predict_action_chunk(batch)
+ self._queues[ACTION].extend(actions.transpose(0, 1))
- self._queues["action"].extend(actions.transpose(0, 1))
-
- action = self._queues["action"].popleft()
+ action = self._queues[ACTION].popleft()
return action
- def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]:
"""Run the batch through the model and compute the loss for training or validation."""
batch = self.normalize_inputs(batch)
- if len(self.expected_image_keys) > 0:
+ if self.config.image_features:
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4)
+ batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
batch = self.normalize_targets(batch)
loss = self.diffusion.compute_loss(batch)
- return {"loss": loss}
+ # no output_dict so returning None
+ return loss, None
def _make_noise_scheduler(name: str, **kwargs: dict) -> DDPMScheduler | DDIMScheduler:
@@ -176,12 +178,9 @@ def __init__(self, config: DiffusionConfig):
self.config = config
# Build observation encoders (depending on which observations are provided).
- global_cond_dim = config.input_shapes["observation.state"][0]
- num_images = len([k for k in config.input_shapes if k.startswith("observation.image")])
- self._use_images = False
- self._use_env_state = False
- if num_images > 0:
- self._use_images = True
+ global_cond_dim = self.config.robot_state_feature.shape[0]
+ if self.config.image_features:
+ num_images = len(self.config.image_features)
if self.config.use_separate_rgb_encoder_per_camera:
encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)]
self.rgb_encoder = nn.ModuleList(encoders)
@@ -189,9 +188,8 @@ def __init__(self, config: DiffusionConfig):
else:
self.rgb_encoder = DiffusionRgbEncoder(config)
global_cond_dim += self.rgb_encoder.feature_dim * num_images
- if "observation.environment_state" in config.input_shapes:
- self._use_env_state = True
- global_cond_dim += config.input_shapes["observation.environment_state"][0]
+ if self.config.env_state_feature:
+ global_cond_dim += self.config.env_state_feature.shape[0]
self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps)
@@ -220,7 +218,7 @@ def conditional_sample(
# Sample prior.
sample = torch.randn(
- size=(batch_size, self.config.horizon, self.config.output_shapes["action"][0]),
+ size=(batch_size, self.config.horizon, self.config.action_feature.shape[0]),
dtype=dtype,
device=device,
generator=generator,
@@ -242,10 +240,10 @@ def conditional_sample(
def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor:
"""Encode image features and concatenate them all together along with the state vector."""
- batch_size, n_obs_steps = batch["observation.state"].shape[:2]
- global_cond_feats = [batch["observation.state"]]
+ batch_size, n_obs_steps = batch[OBS_STATE].shape[:2]
+ global_cond_feats = [batch[OBS_STATE]]
# Extract image features.
- if self._use_images:
+ if self.config.image_features:
if self.config.use_separate_rgb_encoder_per_camera:
# Combine batch and sequence dims while rearranging to make the camera index dimension first.
images_per_camera = einops.rearrange(batch["observation.images"], "b s n ... -> n (b s) ...")
@@ -272,8 +270,8 @@ def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor:
)
global_cond_feats.append(img_features)
- if self._use_env_state:
- global_cond_feats.append(batch["observation.environment_state"])
+ if self.config.env_state_feature:
+ global_cond_feats.append(batch[OBS_ENV_STATE])
# Concatenate features then flatten to (B, global_cond_dim).
return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1)
@@ -374,7 +372,7 @@ def compute_loss(self, batch: dict[str, Tensor]) -> Tensor:
class SpatialSoftmax(nn.Module):
"""
Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al.
- (https://arxiv.org/pdf/1509.06113). A minimal port of the robomimic implementation.
+ (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation.
At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass"
of activations of each channel, i.e., keypoints in the image space for the policy to focus on.
@@ -443,7 +441,7 @@ def forward(self, features: Tensor) -> Tensor:
class DiffusionRgbEncoder(nn.Module):
- """Encoder an RGB image into a 1D feature vector.
+ """Encodes an RGB image into a 1D feature vector.
Includes the ability to normalize and crop the image first.
"""
@@ -482,19 +480,16 @@ def __init__(self, config: DiffusionConfig):
# Set up pooling and final layers.
# Use a dry run to get the feature map shape.
- # The dummy input should take the number of image channels from `config.input_shapes` and it should
+ # The dummy input should take the number of image channels from `config.image_features` and it should
# use the height and width from `config.crop_shape` if it is provided, otherwise it should use the
- # height and width from `config.input_shapes`.
- image_keys = [k for k in config.input_shapes if k.startswith("observation.image")]
+ # height and width from `config.image_features`.
+
# Note: we have a check in the config class to make sure all images have the same shape.
- image_key = image_keys[0]
- dummy_input_h_w = (
- config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:]
- )
- dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w))
- with torch.inference_mode():
- dummy_feature_map = self.backbone(dummy_input)
- feature_map_shape = tuple(dummy_feature_map.shape[1:])
+ images_shape = next(iter(config.image_features.values())).shape
+ dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:]
+ dummy_shape = (1, images_shape[0], *dummy_shape_h_w)
+ feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:]
+
self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints)
self.feature_dim = config.spatial_softmax_num_keypoints * 2
self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim)
@@ -611,7 +606,7 @@ def __init__(self, config: DiffusionConfig, global_cond_dim: int):
# In channels / out channels for each downsampling block in the Unet's encoder. For the decoder, we
# just reverse these.
- in_out = [(config.output_shapes["action"][0], config.down_dims[0])] + list(
+ in_out = [(config.action_feature.shape[0], config.down_dims[0])] + list(
zip(config.down_dims[:-1], config.down_dims[1:], strict=True)
)
@@ -666,7 +661,7 @@ def __init__(self, config: DiffusionConfig, global_cond_dim: int):
self.final_conv = nn.Sequential(
DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size),
- nn.Conv1d(config.down_dims[0], config.output_shapes["action"][0], 1),
+ nn.Conv1d(config.down_dims[0], config.action_feature.shape[0], 1),
)
def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor:
@@ -735,7 +730,7 @@ def __init__(
self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups)
- # FiLM modulation (https://arxiv.org/abs/1709.07871) outputs per-channel bias and (maybe) scale.
+ # FiLM modulation (https://huggingface.co/papers/1709.07871) outputs per-channel bias and (maybe) scale.
cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels
self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels))
diff --git a/src/lerobot/policies/factory.py b/src/lerobot/policies/factory.py
new file mode 100644
index 0000000000..ef56bdb613
--- /dev/null
+++ b/src/lerobot/policies/factory.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from torch import nn
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import FeatureType
+from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
+from lerobot.datasets.utils import dataset_to_policy_features
+from lerobot.envs.configs import EnvConfig
+from lerobot.envs.utils import env_to_policy_features
+from lerobot.policies.act.configuration_act import ACTConfig
+from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
+from lerobot.policies.pi0.configuration_pi0 import PI0Config
+from lerobot.policies.pi0fast.configuration_pi0fast import PI0FASTConfig
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.sac.configuration_sac import SACConfig
+from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
+from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
+from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
+from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
+
+
+def get_policy_class(name: str) -> PreTrainedPolicy:
+ """Get the policy's class and config class given a name (matching the policy class' `name` attribute)."""
+ if name == "tdmpc":
+ from lerobot.policies.tdmpc.modeling_tdmpc import TDMPCPolicy
+
+ return TDMPCPolicy
+ elif name == "diffusion":
+ from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
+
+ return DiffusionPolicy
+ elif name == "act":
+ from lerobot.policies.act.modeling_act import ACTPolicy
+
+ return ACTPolicy
+ elif name == "vqbet":
+ from lerobot.policies.vqbet.modeling_vqbet import VQBeTPolicy
+
+ return VQBeTPolicy
+ elif name == "pi0":
+ from lerobot.policies.pi0.modeling_pi0 import PI0Policy
+
+ return PI0Policy
+ elif name == "pi0fast":
+ from lerobot.policies.pi0fast.modeling_pi0fast import PI0FASTPolicy
+
+ return PI0FASTPolicy
+ elif name == "sac":
+ from lerobot.policies.sac.modeling_sac import SACPolicy
+
+ return SACPolicy
+ elif name == "reward_classifier":
+ from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
+
+ return Classifier
+ elif name == "smolvla":
+ from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy
+
+ return SmolVLAPolicy
+ else:
+ raise NotImplementedError(f"Policy with name {name} is not implemented.")
+
+
+def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig:
+ if policy_type == "tdmpc":
+ return TDMPCConfig(**kwargs)
+ elif policy_type == "diffusion":
+ return DiffusionConfig(**kwargs)
+ elif policy_type == "act":
+ return ACTConfig(**kwargs)
+ elif policy_type == "vqbet":
+ return VQBeTConfig(**kwargs)
+ elif policy_type == "pi0":
+ return PI0Config(**kwargs)
+ elif policy_type == "pi0fast":
+ return PI0FASTConfig(**kwargs)
+ elif policy_type == "sac":
+ return SACConfig(**kwargs)
+ elif policy_type == "smolvla":
+ return SmolVLAConfig(**kwargs)
+ elif policy_type == "reward_classifier":
+ return RewardClassifierConfig(**kwargs)
+ else:
+ raise ValueError(f"Policy type '{policy_type}' is not available.")
+
+
+def make_policy(
+ cfg: PreTrainedConfig,
+ ds_meta: LeRobotDatasetMetadata | None = None,
+ env_cfg: EnvConfig | None = None,
+) -> PreTrainedPolicy:
+ """Make an instance of a policy class.
+
+ This function exists because (for now) we need to parse features from either a dataset or an environment
+ in order to properly dimension and instantiate a policy for that dataset or environment.
+
+ Args:
+ cfg (PreTrainedConfig): The config of the policy to make. If `pretrained_path` is set, the policy will
+ be loaded with the weights from that path.
+ ds_meta (LeRobotDatasetMetadata | None, optional): Dataset metadata to take input/output shapes and
+ statistics to use for (un)normalization of inputs/outputs in the policy. Defaults to None.
+ env_cfg (EnvConfig | None, optional): The config of a gym environment to parse features from. Must be
+ provided if ds_meta is not. Defaults to None.
+
+ Raises:
+ ValueError: Either ds_meta or env and env_cfg must be provided.
+ NotImplementedError: if the policy.type is 'vqbet' and the policy device 'mps' (due to an incompatibility)
+
+ Returns:
+ PreTrainedPolicy: _description_
+ """
+ if bool(ds_meta) == bool(env_cfg):
+ raise ValueError("Either one of a dataset metadata or a sim env must be provided.")
+
+ # NOTE: Currently, if you try to run vqbet with mps backend, you'll get this error.
+ # TODO(aliberts, rcadene): Implement a check_backend_compatibility in policies?
+ # NotImplementedError: The operator 'aten::unique_dim' is not currently implemented for the MPS device. If
+ # you want this op to be added in priority during the prototype phase of this feature, please comment on
+ # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment
+ # variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be
+ # slower than running natively on MPS.
+ if cfg.type == "vqbet" and cfg.device == "mps":
+ raise NotImplementedError(
+ "Current implementation of VQBeT does not support `mps` backend. "
+ "Please use `cpu` or `cuda` backend."
+ )
+
+ policy_cls = get_policy_class(cfg.type)
+
+ kwargs = {}
+ if ds_meta is not None:
+ features = dataset_to_policy_features(ds_meta.features)
+ kwargs["dataset_stats"] = ds_meta.stats
+ else:
+ if not cfg.pretrained_path:
+ logging.warning(
+ "You are instantiating a policy from scratch and its features are parsed from an environment "
+ "rather than a dataset. Normalization modules inside the policy will have infinite values "
+ "by default without stats from a dataset."
+ )
+ features = env_to_policy_features(env_cfg)
+
+ cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
+ cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features}
+ kwargs["config"] = cfg
+
+ if cfg.pretrained_path:
+ # Load a pretrained policy and override the config if needed (for example, if there are inference-time
+ # hyperparameters that we want to vary).
+ kwargs["pretrained_name_or_path"] = cfg.pretrained_path
+ policy = policy_cls.from_pretrained(**kwargs)
+ else:
+ # Make a fresh policy.
+ policy = policy_cls(**kwargs)
+
+ policy.to(cfg.device)
+ assert isinstance(policy, nn.Module)
+
+ # policy = torch.compile(policy, mode="reduce-overhead")
+
+ return policy
diff --git a/src/lerobot/policies/normalize.py b/src/lerobot/policies/normalize.py
new file mode 100644
index 0000000000..119055873c
--- /dev/null
+++ b/src/lerobot/policies/normalize.py
@@ -0,0 +1,420 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import numpy as np
+import torch
+from torch import Tensor, nn
+
+from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
+
+
+def create_stats_buffers(
+ features: dict[str, PolicyFeature],
+ norm_map: dict[str, NormalizationMode],
+ stats: dict[str, dict[str, Tensor]] | None = None,
+) -> dict[str, dict[str, nn.ParameterDict]]:
+ """
+ Create buffers per modality (e.g. "observation.image", "action") containing their mean, std, min, max
+ statistics.
+
+ Args: (see Normalize and Unnormalize)
+
+ Returns:
+ dict: A dictionary where keys are modalities and values are `nn.ParameterDict` containing
+ `nn.Parameters` set to `requires_grad=False`, suitable to not be updated during backpropagation.
+ """
+ stats_buffers = {}
+
+ for key, ft in features.items():
+ norm_mode = norm_map.get(ft.type, NormalizationMode.IDENTITY)
+ if norm_mode is NormalizationMode.IDENTITY:
+ continue
+
+ assert isinstance(norm_mode, NormalizationMode)
+
+ shape = tuple(ft.shape)
+
+ if ft.type is FeatureType.VISUAL:
+ # sanity checks
+ assert len(shape) == 3, f"number of dimensions of {key} != 3 ({shape=}"
+ c, h, w = shape
+ assert c < h and c < w, f"{key} is not channel first ({shape=})"
+ # override image shape to be invariant to height and width
+ shape = (c, 1, 1)
+
+ # Note: we initialize mean, std, min, max to infinity. They should be overwritten
+ # downstream by `stats` or `policy.load_state_dict`, as expected. During forward,
+ # we assert they are not infinity anymore.
+
+ buffer = {}
+ if norm_mode is NormalizationMode.MEAN_STD:
+ mean = torch.ones(shape, dtype=torch.float32) * torch.inf
+ std = torch.ones(shape, dtype=torch.float32) * torch.inf
+ buffer = nn.ParameterDict(
+ {
+ "mean": nn.Parameter(mean, requires_grad=False),
+ "std": nn.Parameter(std, requires_grad=False),
+ }
+ )
+ elif norm_mode is NormalizationMode.MIN_MAX:
+ min = torch.ones(shape, dtype=torch.float32) * torch.inf
+ max = torch.ones(shape, dtype=torch.float32) * torch.inf
+ buffer = nn.ParameterDict(
+ {
+ "min": nn.Parameter(min, requires_grad=False),
+ "max": nn.Parameter(max, requires_grad=False),
+ }
+ )
+
+ # TODO(aliberts, rcadene): harmonize this to only use one framework (np or torch)
+ if stats:
+ if isinstance(stats[key]["mean"], np.ndarray):
+ if norm_mode is NormalizationMode.MEAN_STD:
+ buffer["mean"].data = torch.from_numpy(stats[key]["mean"]).to(dtype=torch.float32)
+ buffer["std"].data = torch.from_numpy(stats[key]["std"]).to(dtype=torch.float32)
+ elif norm_mode is NormalizationMode.MIN_MAX:
+ buffer["min"].data = torch.from_numpy(stats[key]["min"]).to(dtype=torch.float32)
+ buffer["max"].data = torch.from_numpy(stats[key]["max"]).to(dtype=torch.float32)
+ elif isinstance(stats[key]["mean"], torch.Tensor):
+ # Note: The clone is needed to make sure that the logic in save_pretrained doesn't see duplicated
+ # tensors anywhere (for example, when we use the same stats for normalization and
+ # unnormalization). See the logic here
+ # https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97.
+ if norm_mode is NormalizationMode.MEAN_STD:
+ buffer["mean"].data = stats[key]["mean"].clone().to(dtype=torch.float32)
+ buffer["std"].data = stats[key]["std"].clone().to(dtype=torch.float32)
+ elif norm_mode is NormalizationMode.MIN_MAX:
+ buffer["min"].data = stats[key]["min"].clone().to(dtype=torch.float32)
+ buffer["max"].data = stats[key]["max"].clone().to(dtype=torch.float32)
+ else:
+ type_ = type(stats[key]["mean"])
+ raise ValueError(f"np.ndarray or torch.Tensor expected, but type is '{type_}' instead.")
+
+ stats_buffers[key] = buffer
+ return stats_buffers
+
+
+def _no_stats_error_str(name: str) -> str:
+ return (
+ f"`{name}` is infinity. You should either initialize with `stats` as an argument, or use a "
+ "pretrained model."
+ )
+
+
+class Normalize(nn.Module):
+ """Normalizes data (e.g. "observation.image") for more stable and faster convergence during training."""
+
+ def __init__(
+ self,
+ features: dict[str, PolicyFeature],
+ norm_map: dict[str, NormalizationMode],
+ stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ """
+ Args:
+ shapes (dict): A dictionary where keys are input modalities (e.g. "observation.image") and values
+ are their shapes (e.g. `[3,96,96]`]). These shapes are used to create the tensor buffer containing
+ mean, std, min, max statistics. If the provided `shapes` contain keys related to images, the shape
+ is adjusted to be invariant to height and width, assuming a channel-first (c, h, w) format.
+ modes (dict): A dictionary where keys are output modalities (e.g. "observation.image") and values
+ are their normalization modes among:
+ - "mean_std": subtract the mean and divide by standard deviation.
+ - "min_max": map to [-1, 1] range.
+ stats (dict, optional): A dictionary where keys are output modalities (e.g. "observation.image")
+ and values are dictionaries of statistic types and their values (e.g.
+ `{"mean": torch.randn(3,1,1)}, "std": torch.randn(3,1,1)}`). If provided, as expected for
+ training the model for the first time, these statistics will overwrite the default buffers. If
+ not provided, as expected for finetuning or evaluation, the default buffers should to be
+ overwritten by a call to `policy.load_state_dict(state_dict)`. That way, initializing the
+ dataset is not needed to get the stats, since they are already in the policy state_dict.
+ """
+ super().__init__()
+ self.features = features
+ self.norm_map = norm_map
+ self.stats = stats
+ stats_buffers = create_stats_buffers(features, norm_map, stats)
+ for key, buffer in stats_buffers.items():
+ setattr(self, "buffer_" + key.replace(".", "_"), buffer)
+
+ # TODO(rcadene): should we remove torch.no_grad?
+ @torch.no_grad()
+ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ # TODO: Remove this shallow copy
+ batch = dict(batch) # shallow copy avoids mutating the input batch
+ for key, ft in self.features.items():
+ if key not in batch:
+ # FIXME(aliberts, rcadene): This might lead to silent fail!
+ continue
+
+ norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY)
+ if norm_mode is NormalizationMode.IDENTITY:
+ continue
+
+ buffer = getattr(self, "buffer_" + key.replace(".", "_"))
+
+ if norm_mode is NormalizationMode.MEAN_STD:
+ mean = buffer["mean"]
+ std = buffer["std"]
+ assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
+ assert not torch.isinf(std).any(), _no_stats_error_str("std")
+ batch[key] = (batch[key] - mean) / (std + 1e-8)
+ elif norm_mode is NormalizationMode.MIN_MAX:
+ min = buffer["min"]
+ max = buffer["max"]
+ assert not torch.isinf(min).any(), _no_stats_error_str("min")
+ assert not torch.isinf(max).any(), _no_stats_error_str("max")
+ # normalize to [0,1]
+ batch[key] = (batch[key] - min) / (max - min + 1e-8)
+ # normalize to [-1, 1]
+ batch[key] = batch[key] * 2 - 1
+ else:
+ raise ValueError(norm_mode)
+ return batch
+
+
+class Unnormalize(nn.Module):
+ """
+ Similar to `Normalize` but unnormalizes output data (e.g. `{"action": torch.randn(b,c)}`) in their
+ original range used by the environment.
+ """
+
+ def __init__(
+ self,
+ features: dict[str, PolicyFeature],
+ norm_map: dict[str, NormalizationMode],
+ stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ """
+ Args:
+ shapes (dict): A dictionary where keys are input modalities (e.g. "observation.image") and values
+ are their shapes (e.g. `[3,96,96]`]). These shapes are used to create the tensor buffer containing
+ mean, std, min, max statistics. If the provided `shapes` contain keys related to images, the shape
+ is adjusted to be invariant to height and width, assuming a channel-first (c, h, w) format.
+ modes (dict): A dictionary where keys are output modalities (e.g. "observation.image") and values
+ are their normalization modes among:
+ - "mean_std": subtract the mean and divide by standard deviation.
+ - "min_max": map to [-1, 1] range.
+ stats (dict, optional): A dictionary where keys are output modalities (e.g. "observation.image")
+ and values are dictionaries of statistic types and their values (e.g.
+ `{"mean": torch.randn(3,1,1)}, "std": torch.randn(3,1,1)}`). If provided, as expected for
+ training the model for the first time, these statistics will overwrite the default buffers. If
+ not provided, as expected for finetuning or evaluation, the default buffers should to be
+ overwritten by a call to `policy.load_state_dict(state_dict)`. That way, initializing the
+ dataset is not needed to get the stats, since they are already in the policy state_dict.
+ """
+ super().__init__()
+ self.features = features
+ self.norm_map = norm_map
+ self.stats = stats
+ # `self.buffer_observation_state["mean"]` contains `torch.tensor(state_dim)`
+ stats_buffers = create_stats_buffers(features, norm_map, stats)
+ for key, buffer in stats_buffers.items():
+ setattr(self, "buffer_" + key.replace(".", "_"), buffer)
+
+ # TODO(rcadene): should we remove torch.no_grad?
+ @torch.no_grad()
+ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ batch = dict(batch) # shallow copy avoids mutating the input batch
+ for key, ft in self.features.items():
+ if key not in batch:
+ continue
+
+ norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY)
+ if norm_mode is NormalizationMode.IDENTITY:
+ continue
+
+ buffer = getattr(self, "buffer_" + key.replace(".", "_"))
+
+ if norm_mode is NormalizationMode.MEAN_STD:
+ mean = buffer["mean"]
+ std = buffer["std"]
+ assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
+ assert not torch.isinf(std).any(), _no_stats_error_str("std")
+ batch[key] = batch[key] * std + mean
+ elif norm_mode is NormalizationMode.MIN_MAX:
+ min = buffer["min"]
+ max = buffer["max"]
+ assert not torch.isinf(min).any(), _no_stats_error_str("min")
+ assert not torch.isinf(max).any(), _no_stats_error_str("max")
+ batch[key] = (batch[key] + 1) / 2
+ batch[key] = batch[key] * (max - min) + min
+ else:
+ raise ValueError(norm_mode)
+ return batch
+
+
+# TODO (azouitine): We should replace all normalization on the policies with register_buffer normalization
+# and remove the `Normalize` and `Unnormalize` classes.
+def _initialize_stats_buffers(
+ module: nn.Module,
+ features: dict[str, PolicyFeature],
+ norm_map: dict[str, NormalizationMode],
+ stats: dict[str, dict[str, Tensor]] | None = None,
+) -> None:
+ """Register statistics buffers (mean/std or min/max) on the given *module*.
+
+ The logic matches the previous constructors of `NormalizeBuffer` and `UnnormalizeBuffer`,
+ but is factored out so it can be reused by both classes and stay in sync.
+ """
+ for key, ft in features.items():
+ norm_mode = norm_map.get(ft.type, NormalizationMode.IDENTITY)
+ if norm_mode is NormalizationMode.IDENTITY:
+ continue
+
+ shape: tuple[int, ...] = tuple(ft.shape)
+ if ft.type is FeatureType.VISUAL:
+ # reduce spatial dimensions, keep channel dimension only
+ c, *_ = shape
+ shape = (c, 1, 1)
+
+ prefix = key.replace(".", "_")
+
+ if norm_mode is NormalizationMode.MEAN_STD:
+ mean = torch.full(shape, torch.inf, dtype=torch.float32)
+ std = torch.full(shape, torch.inf, dtype=torch.float32)
+
+ if stats and key in stats and "mean" in stats[key] and "std" in stats[key]:
+ mean_data = stats[key]["mean"]
+ std_data = stats[key]["std"]
+ if isinstance(mean_data, torch.Tensor):
+ # Note: The clone is needed to make sure that the logic in save_pretrained doesn't see duplicated
+ # tensors anywhere (for example, when we use the same stats for normalization and
+ # unnormalization). See the logic here
+ # https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97.
+ mean = mean_data.clone().to(dtype=torch.float32)
+ std = std_data.clone().to(dtype=torch.float32)
+ else:
+ raise ValueError(f"Unsupported stats type for key '{key}' (expected ndarray or Tensor).")
+
+ module.register_buffer(f"{prefix}_mean", mean)
+ module.register_buffer(f"{prefix}_std", std)
+ continue
+
+ if norm_mode is NormalizationMode.MIN_MAX:
+ min_val = torch.full(shape, torch.inf, dtype=torch.float32)
+ max_val = torch.full(shape, torch.inf, dtype=torch.float32)
+
+ if stats and key in stats and "min" in stats[key] and "max" in stats[key]:
+ min_data = stats[key]["min"]
+ max_data = stats[key]["max"]
+ if isinstance(min_data, torch.Tensor):
+ min_val = min_data.clone().to(dtype=torch.float32)
+ max_val = max_data.clone().to(dtype=torch.float32)
+ else:
+ raise ValueError(f"Unsupported stats type for key '{key}' (expected ndarray or Tensor).")
+
+ module.register_buffer(f"{prefix}_min", min_val)
+ module.register_buffer(f"{prefix}_max", max_val)
+ continue
+
+ raise ValueError(norm_mode)
+
+
+class NormalizeBuffer(nn.Module):
+ """Same as `Normalize` but statistics are stored as registered buffers rather than parameters."""
+
+ def __init__(
+ self,
+ features: dict[str, PolicyFeature],
+ norm_map: dict[str, NormalizationMode],
+ stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ super().__init__()
+ self.features = features
+ self.norm_map = norm_map
+
+ _initialize_stats_buffers(self, features, norm_map, stats)
+
+ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ batch = dict(batch)
+ for key, ft in self.features.items():
+ if key not in batch:
+ continue
+
+ norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY)
+ if norm_mode is NormalizationMode.IDENTITY:
+ continue
+
+ prefix = key.replace(".", "_")
+
+ if norm_mode is NormalizationMode.MEAN_STD:
+ mean = getattr(self, f"{prefix}_mean")
+ std = getattr(self, f"{prefix}_std")
+ assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
+ assert not torch.isinf(std).any(), _no_stats_error_str("std")
+ batch[key] = (batch[key] - mean) / (std + 1e-8)
+ continue
+
+ if norm_mode is NormalizationMode.MIN_MAX:
+ min_val = getattr(self, f"{prefix}_min")
+ max_val = getattr(self, f"{prefix}_max")
+ assert not torch.isinf(min_val).any(), _no_stats_error_str("min")
+ assert not torch.isinf(max_val).any(), _no_stats_error_str("max")
+ batch[key] = (batch[key] - min_val) / (max_val - min_val + 1e-8)
+ batch[key] = batch[key] * 2 - 1
+ continue
+
+ raise ValueError(norm_mode)
+
+ return batch
+
+
+class UnnormalizeBuffer(nn.Module):
+ """Inverse operation of `NormalizeBuffer`. Uses registered buffers for statistics."""
+
+ def __init__(
+ self,
+ features: dict[str, PolicyFeature],
+ norm_map: dict[str, NormalizationMode],
+ stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ super().__init__()
+ self.features = features
+ self.norm_map = norm_map
+
+ _initialize_stats_buffers(self, features, norm_map, stats)
+
+ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ # batch = dict(batch)
+ for key, ft in self.features.items():
+ if key not in batch:
+ continue
+
+ norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY)
+ if norm_mode is NormalizationMode.IDENTITY:
+ continue
+
+ prefix = key.replace(".", "_")
+
+ if norm_mode is NormalizationMode.MEAN_STD:
+ mean = getattr(self, f"{prefix}_mean")
+ std = getattr(self, f"{prefix}_std")
+ assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
+ assert not torch.isinf(std).any(), _no_stats_error_str("std")
+ batch[key] = batch[key] * std + mean
+ continue
+
+ if norm_mode is NormalizationMode.MIN_MAX:
+ min_val = getattr(self, f"{prefix}_min")
+ max_val = getattr(self, f"{prefix}_max")
+ assert not torch.isinf(min_val).any(), _no_stats_error_str("min")
+ assert not torch.isinf(max_val).any(), _no_stats_error_str("max")
+ batch[key] = (batch[key] + 1) / 2
+ batch[key] = batch[key] * (max_val - min_val) + min_val
+ continue
+
+ raise ValueError(norm_mode)
+
+ return batch
diff --git a/src/lerobot/policies/pi0/configuration_pi0.py b/src/lerobot/policies/pi0/configuration_pi0.py
new file mode 100644
index 0000000000..c9728e418b
--- /dev/null
+++ b/src/lerobot/policies/pi0/configuration_pi0.py
@@ -0,0 +1,149 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
+from lerobot.optim.optimizers import AdamWConfig
+from lerobot.optim.schedulers import (
+ CosineDecayWithWarmupSchedulerConfig,
+)
+
+
+@PreTrainedConfig.register_subclass("pi0")
+@dataclass
+class PI0Config(PreTrainedConfig):
+ # Input / output structure.
+ n_obs_steps: int = 1
+ chunk_size: int = 50
+ n_action_steps: int = 50
+
+ normalization_mapping: dict[str, NormalizationMode] = field(
+ default_factory=lambda: {
+ "VISUAL": NormalizationMode.IDENTITY,
+ "STATE": NormalizationMode.MEAN_STD,
+ "ACTION": NormalizationMode.MEAN_STD,
+ }
+ )
+
+ # Shorter state and action vectors will be padded
+ max_state_dim: int = 32
+ max_action_dim: int = 32
+
+ # Image preprocessing
+ resize_imgs_with_padding: tuple[int, int] = (224, 224)
+
+ # Add empty images. Used by pi0_aloha_sim which adds the empty
+ # left and right wrist cameras in addition to the top camera.
+ empty_cameras: int = 0
+
+ # Converts the joint and gripper values from the standard Aloha space to
+ # the space used by the pi internal runtime which was used to train the base model.
+ adapt_to_pi_aloha: bool = False
+
+ # Converts joint dimensions to deltas with respect to the current state before passing to the model.
+ # Gripper dimensions will remain in absolute values.
+ use_delta_joint_actions_aloha: bool = False
+
+ # Tokenizer
+ tokenizer_max_length: int = 48
+
+ # Projector
+ proj_width: int = 1024
+
+ # Decoding
+ num_steps: int = 10
+
+ # Attention utils
+ use_cache: bool = True
+ attention_implementation: str = "eager" # or fa2, flex
+
+ # Finetuning settings
+ freeze_vision_encoder: bool = True
+ train_expert_only: bool = False
+ train_state_proj: bool = True
+
+ # Training presets
+ optimizer_lr: float = 2.5e-5
+ optimizer_betas: tuple[float, float] = (0.9, 0.95)
+ optimizer_eps: float = 1e-8
+ optimizer_weight_decay: float = 1e-10
+
+ scheduler_warmup_steps: int = 1_000
+ scheduler_decay_steps: int = 30_000
+ scheduler_decay_lr: float = 2.5e-6
+
+ # TODO: Add EMA
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ # TODO(Steven): Validate device and amp? in all policy configs?
+ """Input validation (not exhaustive)."""
+ if self.n_action_steps > self.chunk_size:
+ raise ValueError(
+ f"The chunk size is the upper bound for the number of action steps per model invocation. Got "
+ f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`."
+ )
+ if self.n_obs_steps != 1:
+ raise ValueError(
+ f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`"
+ )
+
+ if self.use_delta_joint_actions_aloha:
+ raise NotImplementedError(
+ "`use_delta_joint_actions_aloha` is used by pi0 for aloha real models. It is not ported yet in LeRobot."
+ )
+
+ def validate_features(self) -> None:
+ # TODO: implement value error
+ # if not self.image_features and not self.env_state_feature:
+ # raise ValueError("You must provide at least one image or the environment state among the inputs.")
+
+ for i in range(self.empty_cameras):
+ key = f"observation.images.empty_camera_{i}"
+ empty_camera = PolicyFeature(
+ type=FeatureType.VISUAL,
+ shape=(3, 480, 640),
+ )
+ self.input_features[key] = empty_camera
+
+ def get_optimizer_preset(self) -> AdamWConfig:
+ return AdamWConfig(
+ lr=self.optimizer_lr,
+ betas=self.optimizer_betas,
+ eps=self.optimizer_eps,
+ weight_decay=self.optimizer_weight_decay,
+ )
+
+ def get_scheduler_preset(self):
+ return CosineDecayWithWarmupSchedulerConfig(
+ peak_lr=self.optimizer_lr,
+ decay_lr=self.scheduler_decay_lr,
+ num_warmup_steps=self.scheduler_warmup_steps,
+ num_decay_steps=self.scheduler_decay_steps,
+ )
+
+ @property
+ def observation_delta_indices(self) -> None:
+ return None
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(self.chunk_size))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/src/lerobot/policies/pi0/conversion_scripts/benchmark.py b/src/lerobot/policies/pi0/conversion_scripts/benchmark.py
new file mode 100644
index 0000000000..c1a4882449
--- /dev/null
+++ b/src/lerobot/policies/pi0/conversion_scripts/benchmark.py
@@ -0,0 +1,82 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.policies.factory import make_policy
+
+torch.backends.cudnn.benchmark = True
+
+
+def main():
+ device = "cuda"
+ dataset_repo_id = "danaaubakirova/koch_test"
+ # model_name = "pi0_base"
+ # ckpt_torch_dir = Path.home() / f".cache/openpi/openpi-assets/checkpoints/{model_name}_pytorch"
+ ckpt_torch_dir = "lerobot/pi0"
+
+ dataset = LeRobotDataset(dataset_repo_id, episodes=[0])
+
+ dataloader = torch.utils.data.DataLoader(
+ dataset,
+ num_workers=0,
+ batch_size=1,
+ )
+
+ batch = next(iter(dataloader))
+
+ # To device
+ for k in batch:
+ if isinstance(batch[k], torch.Tensor):
+ batch[k] = batch[k].to(device=device, dtype=torch.float32)
+
+ cfg = PreTrainedConfig.from_pretrained(ckpt_torch_dir)
+ cfg.pretrained_path = ckpt_torch_dir
+ policy = make_policy(cfg, ds_meta=dataset.meta)
+
+ # policy = torch.compile(policy, mode="reduce-overhead")
+
+ warmup_iters = 10
+ benchmark_iters = 30
+
+ # Warmup
+ for _ in range(warmup_iters):
+ torch.cuda.synchronize()
+ policy.select_action(batch)
+ policy.reset()
+ torch.cuda.synchronize()
+
+ # Benchmark
+ start_event = torch.cuda.Event(enable_timing=True)
+ end_event = torch.cuda.Event(enable_timing=True)
+
+ start_event.record()
+ for _ in range(benchmark_iters):
+ policy.select_action(batch)
+ policy.reset()
+ end_event.record()
+
+ # Synchronize and measure time
+ torch.cuda.synchronize()
+ elapsed_time_ms = start_event.elapsed_time(end_event)
+
+ avg_time_per_iter = elapsed_time_ms / benchmark_iters
+ print(f"Average execution time per iteration: {avg_time_per_iter:.3f} ms")
+
+
+if __name__ == "__main__":
+ with torch.inference_mode():
+ main()
diff --git a/src/lerobot/policies/pi0/conversion_scripts/compare_with_jax.py b/src/lerobot/policies/pi0/conversion_scripts/compare_with_jax.py
new file mode 100644
index 0000000000..c0c2e48169
--- /dev/null
+++ b/src/lerobot/policies/pi0/conversion_scripts/compare_with_jax.py
@@ -0,0 +1,131 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import pickle
+from pathlib import Path
+
+import torch
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
+from lerobot.policies.factory import make_policy
+
+
+def display(tensor: torch.Tensor):
+ if tensor.dtype == torch.bool:
+ tensor = tensor.float()
+ print(f"Shape: {tensor.shape}")
+ print(f"Mean: {tensor.mean().item()}")
+ print(f"Std: {tensor.std().item()}")
+ print(f"Min: {tensor.min().item()}")
+ print(f"Max: {tensor.max().item()}")
+
+
+def main():
+ num_motors = 14
+ device = "cuda"
+ # model_name = "pi0_aloha_towel"
+ model_name = "pi0_aloha_sim"
+
+ if model_name == "pi0_aloha_towel":
+ dataset_repo_id = "lerobot/aloha_static_towel"
+ else:
+ dataset_repo_id = "lerobot/aloha_sim_transfer_cube_human"
+
+ ckpt_torch_dir = Path.home() / f".cache/openpi/openpi-assets/checkpoints/{model_name}_pytorch"
+ ckpt_jax_dir = Path.home() / f".cache/openpi/openpi-assets/checkpoints/{model_name}"
+ save_dir = Path(f"../openpi/data/{model_name}/save")
+
+ with open(save_dir / "example.pkl", "rb") as f:
+ example = pickle.load(f)
+ with open(save_dir / "outputs.pkl", "rb") as f:
+ outputs = pickle.load(f)
+ with open(save_dir / "noise.pkl", "rb") as f:
+ noise = pickle.load(f)
+
+ with open(ckpt_jax_dir / "assets/norm_stats.json") as f:
+ norm_stats = json.load(f)
+
+ # Override stats
+ dataset_meta = LeRobotDatasetMetadata(dataset_repo_id)
+ dataset_meta.stats["observation.state"]["mean"] = torch.tensor(
+ norm_stats["norm_stats"]["state"]["mean"][:num_motors], dtype=torch.float32
+ )
+ dataset_meta.stats["observation.state"]["std"] = torch.tensor(
+ norm_stats["norm_stats"]["state"]["std"][:num_motors], dtype=torch.float32
+ )
+
+ # Create LeRobot batch from Jax
+ batch = {}
+ for cam_key, uint_chw_array in example["images"].items():
+ batch[f"observation.images.{cam_key}"] = torch.from_numpy(uint_chw_array) / 255.0
+ batch["observation.state"] = torch.from_numpy(example["state"])
+ batch["action"] = torch.from_numpy(outputs["actions"])
+ batch["task"] = example["prompt"]
+
+ if model_name == "pi0_aloha_towel":
+ del batch["observation.images.cam_low"]
+ elif model_name == "pi0_aloha_sim":
+ batch["observation.images.top"] = batch["observation.images.cam_high"]
+ del batch["observation.images.cam_high"]
+
+ # Batchify
+ for key in batch:
+ if isinstance(batch[key], torch.Tensor):
+ batch[key] = batch[key].unsqueeze(0)
+ elif isinstance(batch[key], str):
+ batch[key] = [batch[key]]
+ else:
+ raise ValueError(f"{key}, {batch[key]}")
+
+ # To device
+ for k in batch:
+ if isinstance(batch[k], torch.Tensor):
+ batch[k] = batch[k].to(device=device, dtype=torch.float32)
+
+ noise = torch.from_numpy(noise).to(device=device, dtype=torch.float32)
+
+ from lerobot import policies # noqa
+
+ cfg = PreTrainedConfig.from_pretrained(ckpt_torch_dir)
+ cfg.pretrained_path = ckpt_torch_dir
+ policy = make_policy(cfg, dataset_meta)
+
+ # loss_dict = policy.forward(batch, noise=noise, time=time_beta)
+ # loss_dict["loss"].backward()
+ # print("losses")
+ # display(loss_dict["losses_after_forward"])
+ # print("pi_losses")
+ # display(pi_losses)
+
+ actions = []
+ for _ in range(50):
+ action = policy.select_action(batch, noise=noise)
+ actions.append(action)
+
+ actions = torch.stack(actions, dim=1)
+ pi_actions = batch["action"]
+ print("actions")
+ display(actions)
+ print()
+ print("pi_actions")
+ display(pi_actions)
+ print("atol=3e-2", torch.allclose(actions, pi_actions, atol=3e-2))
+ print("atol=2e-2", torch.allclose(actions, pi_actions, atol=2e-2))
+ print("atol=1e-2", torch.allclose(actions, pi_actions, atol=1e-2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/lerobot/policies/pi0/conversion_scripts/conversion_utils.py b/src/lerobot/policies/pi0/conversion_scripts/conversion_utils.py
new file mode 100644
index 0000000000..8835da31ef
--- /dev/null
+++ b/src/lerobot/policies/pi0/conversion_scripts/conversion_utils.py
@@ -0,0 +1,84 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from transformers import GemmaConfig, PaliGemmaConfig
+
+
+def get_paligemma_config(precision: str):
+ config = {
+ "image_token_index": None,
+ "pad_token_id": 0,
+ "bos_token_id": 2,
+ "eos_token_id": 1,
+ }
+
+ # image_sizes = {"2b-test": 224, "3b-224px": 224, "3b-448px": 448, "3b-896px": 896}
+
+ image_size = 224 # image_sizes[variant]
+ patch_size = 14
+ num_image_tokens = (image_size**2) // (patch_size**2)
+
+ config["image_token_index"] = 257152
+ text_config = {
+ "vocab_size": 257152,
+ "num_hidden_layers": 18,
+ "num_key_value_heads": 1,
+ "head_dim": 256,
+ "torch_dtype": precision,
+ "hidden_size": 2048,
+ "hidden_activation": "gelu_pytorch_tanh",
+ "num_attention_heads": 8,
+ "intermediate_size": 16384,
+ "is_encoder_decoder": False,
+ }
+ vision_config = {
+ "torch_dtype": precision,
+ "image_size": image_size,
+ "patch_size": patch_size,
+ "num_image_tokens": num_image_tokens,
+ "hidden_size": 1152,
+ "intermediate_size": 4304,
+ "num_hidden_layers": 27,
+ "num_attention_heads": 16,
+ "projector_hidden_act": "gelu_fast",
+ "vision_use_head": False,
+ }
+ final_config = PaliGemmaConfig(text_config=text_config, vision_config=vision_config, **config)
+ return final_config
+
+
+def get_gemma_config(precision: str):
+ config = {
+ "image_token_index": None,
+ "pad_token_id": 0,
+ "bos_token_id": 2,
+ "eos_token_id": 1,
+ }
+
+ config["image_token_index"] = 257152
+ text_config = {
+ "vocab_size": 257152,
+ "num_hidden_layers": 18,
+ "num_key_value_heads": 1,
+ "head_dim": 256,
+ "torch_dtype": precision,
+ "hidden_size": 1024,
+ "hidden_activation": "gelu_pytorch_tanh",
+ "num_attention_heads": 8,
+ "intermediate_size": 4096,
+ "is_encoder_decoder": False,
+ }
+ final_config = GemmaConfig()
+ final_config.update(text_config)
+ return final_config
diff --git a/src/lerobot/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py b/src/lerobot/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py
new file mode 100644
index 0000000000..742c9ab3f7
--- /dev/null
+++ b/src/lerobot/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py
@@ -0,0 +1,437 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Convert pi0 parameters from Jax to Pytorch
+
+Follow [README of openpi](https://github.com/Physical-Intelligence/openpi) to create a new environment
+and install the required libraries.
+
+```bash
+cd ~/code/openpi
+source .venv/bin/activate
+```
+
+Example downloading parameters:
+```bash
+python
+>>> import openpi.shared.download as download
+>>> path='s3://openpi-assets/checkpoints/pi0_base/params'
+>>> download.maybe_download(path)
+```
+
+Converting pi0_base:
+```python
+python -m lerobot.policies.pi0.conversion_scripts.convert_pi0_to_hf_lerobot \
+ --checkpoint_dir /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_base/params \
+ --output_path /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_base_pytorch
+```
+
+```python
+python -m lerobot.policies.pi0.conversion_scripts.convert_pi0_to_hf_lerobot \
+ --checkpoint_dir /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim/params \
+ --output_path /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim_pytorch
+```
+"""
+
+import argparse
+import pathlib
+
+import jax
+import numpy as np
+import orbax.checkpoint as ocp
+import torch
+from jax.sharding import SingleDeviceSharding
+
+from lerobot.policies.pi0.configuration_pi0 import PI0Config
+from lerobot.policies.pi0.conversion_scripts.conversion_utils import (
+ get_gemma_config,
+ get_paligemma_config,
+)
+from lerobot.policies.pi0.modeling_pi0 import PI0Policy
+
+PRECISIONS = {"bfloat16": torch.bfloat16, "float32": torch.float32, "float16": torch.float16}
+
+
+def slice_paligemma_state_dict(state_dict, config):
+ suffix = "/value" if "img/embedding/kernel/value" in state_dict else ""
+
+ # fmt: off
+ # patch embeddings
+ state_dict["paligemma.vision_tower.vision_model.embeddings.patch_embedding.weight"] = state_dict.pop(f"img/embedding/kernel{suffix}").transpose(
+ 3, 2, 0, 1
+ )
+ state_dict["paligemma.vision_tower.vision_model.embeddings.patch_embedding.bias"] = state_dict.pop(f"img/embedding/bias{suffix}")
+ # positional embeddings
+ state_dict["paligemma.vision_tower.vision_model.embeddings.position_embedding.weight"] = state_dict.pop(f"img/pos_embedding{suffix}").reshape(
+ -1, config.vision_config.hidden_size
+ )
+
+ # extract vision layers to be sliced at index 0. There are 27 layers in the base model.
+ encoderblock_layernorm0_scale = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_0/scale{suffix}")
+ encoderblock_layernorm0_bias = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_0/bias{suffix}")
+ encoderblock_layernorm1_scale = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_1/scale{suffix}")
+ encoderblock_layernorm1_bias = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_1/bias{suffix}")
+
+ encoderblock_mlp_dense0_kernel= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_0/kernel{suffix}")
+ encoderblock_mlp_dense0_bias= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_0/bias{suffix}")
+ encoderblock_mlp_dense1_kernel= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_1/kernel{suffix}")
+ encoderblock_mlp_dense1_bias= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_1/bias{suffix}")
+
+ encoderblock_attention_0_key_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/kernel{suffix}")
+ encoderblock_attention_0_key_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/bias{suffix}")
+ encoderblock_attention_0_value_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/kernel{suffix}")
+ encoderblock_attention_0_value_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/bias{suffix}")
+ encoderblock_attention_0_query_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/kernel{suffix}")
+ encoderblock_attention_0_query_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/bias{suffix}")
+ encoderblock_attention_0_out_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/kernel{suffix}")
+ encoderblock_attention_0_out_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/bias{suffix}")
+
+ for i in range(config.vision_config.num_hidden_layers):
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm1.weight"] = encoderblock_layernorm0_scale[i].transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm1.bias"] = encoderblock_layernorm0_bias[i]
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm2.weight"] = encoderblock_layernorm1_scale[i].transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm2.bias"] = encoderblock_layernorm1_bias[i]
+
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.weight"] = encoderblock_mlp_dense0_kernel[i].transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.bias"] = encoderblock_mlp_dense0_bias[i]
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.weight"] = encoderblock_mlp_dense1_kernel[i].transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.bias"] = encoderblock_mlp_dense1_bias[i]
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = encoderblock_attention_0_key_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = encoderblock_attention_0_key_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = encoderblock_attention_0_value_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = encoderblock_attention_0_value_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = encoderblock_attention_0_query_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = encoderblock_attention_0_query_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"] = encoderblock_attention_0_out_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose()
+ state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"] = encoderblock_attention_0_out_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1)
+
+ state_dict["paligemma.vision_tower.vision_model.post_layernorm.weight"] = state_dict.pop(f"img/Transformer/encoder_norm/scale{suffix}").transpose()
+ state_dict["paligemma.vision_tower.vision_model.post_layernorm.bias"] = state_dict.pop(f"img/Transformer/encoder_norm/bias{suffix}")
+
+ # multimodal projector
+
+ state_dict['paligemma.multi_modal_projector.linear.weight'] = state_dict.pop(f"img/head/kernel{suffix}").transpose()
+ state_dict['paligemma.multi_modal_projector.linear.bias'] = state_dict.pop(f"img/head/bias{suffix}")
+
+ # text decoder (gemma)
+ embedding_vector = state_dict.pop(f"llm/embedder/input_embedding{suffix}")
+ state_dict["paligemma.language_model.model.embed_tokens.weight"] = embedding_vector
+
+ # pop the einsum attention + mlp representations. There are 18 layers in gemma-2b.
+
+ llm_attention_attn_vec_einsum = state_dict.pop(f"llm/layers/attn/attn_vec_einsum/w{suffix}")
+ llm_attention_kv_einsum = state_dict.pop(f"llm/layers/attn/kv_einsum/w{suffix}")
+ llm_attention_q_einsum = state_dict.pop(f"llm/layers/attn/q_einsum/w{suffix}")
+
+ llm_mlp_gating_einsum = state_dict.pop(f"llm/layers/mlp/gating_einsum{suffix}")
+ llm_mlp_linear = state_dict.pop(f"llm/layers/mlp/linear{suffix}")
+ # TODO verify correctness of layer norm loading
+
+ llm_input_layernorm = state_dict.pop(f"llm/layers/pre_attention_norm/scale{suffix}")
+ llm_post_attention_layernorm = state_dict.pop(f"llm/layers/pre_ffw_norm/scale{suffix}")
+
+ for i in range(config.text_config.num_hidden_layers):
+ # llm_attention_q_einsum[i].shape = (8, 2048, 256)
+ q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size)
+
+ state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped
+
+ # llm_attention_kv_einsum[i, 0, 0].shape = (2048, 256)
+ k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose()
+ state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped
+ # llm_attention_kv_einsum[i, 1, 0].shape = (2048, 256)
+ v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose()
+ state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped
+
+ # output projection.
+
+ # llm_attention_attn_vec_einsum[i].shape = (8, 256, 2048)
+ o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].transpose(2, 0, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size)
+
+ state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped
+ # mlp layers
+ gate_proj_weight = llm_mlp_gating_einsum[i, 0]
+ state_dict[f"paligemma.language_model.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose()
+ up_proj_weight = llm_mlp_gating_einsum[i, 1]
+ state_dict[f"paligemma.language_model.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose()
+ state_dict[f"paligemma.language_model.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose()
+ state_dict[f"paligemma.language_model.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i]
+ state_dict[f"paligemma.language_model.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i]
+
+ state_dict["paligemma.language_model.model.norm.weight"] = state_dict.pop(f"llm/final_norm/scale{suffix}")
+ state_dict["paligemma.language_model.lm_head.weight"] = embedding_vector # weights are tied.
+
+ # fmt: on
+ expert_dict = {}
+ final_state_dict = {}
+ for key, value in state_dict.items():
+ if key not in [
+ f"llm/final_norm_1/scale{suffix}",
+ f"llm/layers/attn/attn_vec_einsum_1/w{suffix}",
+ f"llm/layers/attn/kv_einsum_1/w{suffix}",
+ f"llm/layers/attn/q_einsum_1/w{suffix}",
+ f"llm/layers/mlp_1/gating_einsum{suffix}",
+ f"llm/layers/mlp_1/linear{suffix}",
+ f"llm/layers/pre_attention_norm_1/scale{suffix}",
+ f"llm/layers/pre_ffw_norm_1/scale{suffix}",
+ ]:
+ final_state_dict[key] = torch.from_numpy(value)
+ else:
+ expert_dict[key] = value
+
+ return final_state_dict, expert_dict
+
+
+def slice_gemma_state_dict(state_dict, config, num_expert=1):
+ # fmt: off
+ # text decoder (gemma)
+ # no embedding vector, the expert just has the decoder layers
+
+ embedding_vector = torch.zeros([config.vocab_size, config.hidden_size])
+ state_dict["gemma_expert.model.embed_tokens.weight"] = embedding_vector
+
+ # pop the einsum attention + mlp representations. There are 18 layers in gemma-2b.
+
+ suffix = "/value" if f"llm/layers/attn/attn_vec_einsum_{num_expert}/w/value" in state_dict else ""
+
+ llm_attention_attn_vec_einsum = state_dict.pop(f"llm/layers/attn/attn_vec_einsum_{num_expert}/w{suffix}")
+ llm_attention_kv_einsum = state_dict.pop(f"llm/layers/attn/kv_einsum_{num_expert}/w{suffix}")
+ llm_attention_q_einsum = state_dict.pop(f"llm/layers/attn/q_einsum_{num_expert}/w{suffix}")
+
+ llm_mlp_gating_einsum = state_dict.pop(f"llm/layers/mlp_{num_expert}/gating_einsum{suffix}")
+ llm_mlp_linear = state_dict.pop(f"llm/layers/mlp_{num_expert}/linear{suffix}")
+ # TODO verify correctness of layer norm loading
+
+ llm_input_layernorm = state_dict.pop(f"llm/layers/pre_attention_norm_{num_expert}/scale{suffix}")
+ llm_post_attention_layernorm = state_dict.pop(f"llm/layers/pre_ffw_norm_{num_expert}/scale{suffix}")
+
+ for i in range(config.num_hidden_layers):
+ q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.num_attention_heads * config.head_dim, config.hidden_size)
+
+ state_dict[f"gemma_expert.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped
+
+ k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose()
+ state_dict[f"gemma_expert.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped
+ v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose()
+ state_dict[f"gemma_expert.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped
+
+ # output projection.
+
+ # llm_attention_attn_vec_einsum[i].shape = (8, 256, 1024)
+ o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].reshape(config.num_attention_heads * config.head_dim, config.hidden_size).transpose(1,0)# .transpose(2, 0, 1).reshape(config.num_attention_heads * config.head_dim, config.hidden_size).transpose(1, 0)
+
+ state_dict[f"gemma_expert.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped
+ # mlp layers
+ gate_proj_weight = llm_mlp_gating_einsum[i, 0]
+ state_dict[f"gemma_expert.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose()
+ up_proj_weight = llm_mlp_gating_einsum[i, 1]
+ state_dict[f"gemma_expert.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose()
+ state_dict[f"gemma_expert.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose()
+ state_dict[f"gemma_expert.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i]
+ state_dict[f"gemma_expert.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i]
+
+ state_dict["gemma_expert.model.norm.weight"] = state_dict.pop(f"llm/final_norm_{num_expert}/scale{suffix}")
+ state_dict["gemma_expert.lm_head.weight"] = embedding_vector # weights are tied. (and zeros here)
+
+ # fmt: on
+ final_state_dict = {}
+ for key, value in state_dict.items():
+ if not isinstance(value, torch.Tensor):
+ final_state_dict[key] = torch.from_numpy(value)
+ else:
+ final_state_dict[key] = value
+ return final_state_dict
+
+
+def flatten_for_memory(tree, parent_key=""):
+ out = {}
+ for k, v in tree.items():
+ new_key = f"{parent_key}/{k}" if parent_key else k
+ if isinstance(v, dict):
+ out.update(flatten_for_memory(v, new_key))
+ else:
+ out[new_key] = np.array(v) # Ensure conversion to np.array for consistency
+ return out
+
+
+def flatten_for_npz(tree, parent_key=""):
+ out = {}
+ for k, v in tree.items():
+ new_key = f"{parent_key}/{k}" if parent_key else k
+ if isinstance(v, dict):
+ out.update(flatten_for_npz(v, new_key))
+ else:
+ # bf16/f32 here?
+ out[new_key] = np.array(v)
+ return out
+
+
+def slice_initial_orbax_checkpoint(checkpoint_dir: str):
+ params_path = pathlib.Path(checkpoint_dir).resolve()
+ checkpointer = ocp.PyTreeCheckpointer()
+
+ metadata = checkpointer.metadata(params_path)
+ print("Metadata keys:", list(metadata.keys()))
+
+ params_name = "params"
+
+ item = {params_name: metadata[params_name]}
+ device = jax.local_devices()[0] # Use the first local device
+ sharding = SingleDeviceSharding(device)
+ restored = checkpointer.restore(
+ params_path,
+ ocp.args.PyTreeRestore(
+ item=item,
+ restore_args=jax.tree_util.tree_map(
+ lambda _: ocp.ArrayRestoreArgs(
+ restore_type=jax.Array, # or np.ndarray, but bf16 is annoying about it
+ sharding=sharding,
+ ),
+ item,
+ ),
+ transforms={},
+ ),
+ )
+ params = restored[params_name]
+
+ # get params for PaliGemma
+ pali_params = params["PaliGemma"]
+ del params["PaliGemma"]
+ pali_params_flat = flatten_for_npz(pali_params)
+ return {"paligemma_params": pali_params_flat, "projection_params": params}
+
+
+def update_keys_with_prefix(d: dict, prefix: str) -> dict:
+ """Update dictionary keys by adding a prefix."""
+ return {f"{prefix}{key}": value for key, value in d.items()}
+
+
+def convert_pi0_checkpoint(checkpoint_dir: str, precision: str, tokenizer_id: str, output_path: str):
+ # Break down orbax ckpts - they are in OCDBT
+ initial_params = slice_initial_orbax_checkpoint(checkpoint_dir=checkpoint_dir)
+ # process projection params
+ keys = [
+ "state_proj",
+ "action_in_proj",
+ "action_out_proj",
+ "action_time_mlp_in",
+ "action_time_mlp_out",
+ ]
+
+ projection_params = {}
+ for key in keys:
+ kernel_params = initial_params["projection_params"][key]["kernel"]
+ bias_params = initial_params["projection_params"][key]["bias"]
+ if isinstance(kernel_params, dict):
+ weight = kernel_params["value"]
+ bias = bias_params["value"]
+ else:
+ weight = kernel_params
+ bias = bias_params
+ projection_params[f"{key}.weight"] = torch.from_numpy(np.array(weight)).T
+ projection_params[f"{key}.bias"] = torch.from_numpy(np.array(bias))
+
+ # Process PaliGemma weights
+ paligemma_config = get_paligemma_config(precision)
+ paligemma_params, gemma_raw_dictionary = slice_paligemma_state_dict(
+ initial_params["paligemma_params"], paligemma_config
+ )
+
+ # Process Gemma weights (at this stage they are unused)
+ gemma_config = get_gemma_config(precision)
+ gemma_params = slice_gemma_state_dict(gemma_raw_dictionary, config=gemma_config)
+
+ # Instantiate model from configs
+
+ if "pi0_aloha_sim" in checkpoint_dir:
+ pi0_config = PI0Config(
+ empty_cameras=2,
+ adapt_to_pi_aloha=True,
+ use_delta_joint_actions_aloha=False,
+ )
+ elif "pi0_aloha_towel" in checkpoint_dir:
+ pi0_config = PI0Config(
+ adapt_to_pi_aloha=True,
+ use_delta_joint_actions_aloha=True,
+ )
+ elif "pi0_base" in checkpoint_dir:
+ pi0_config = PI0Config(
+ empty_cameras=0,
+ adapt_to_pi_aloha=False,
+ use_delta_joint_actions_aloha=False,
+ )
+ else:
+ raise ValueError()
+
+ # gemma_config=gemma_config, paligemma_config=paligemma_config)
+ pi0_model = PI0Policy(pi0_config)
+
+ paligemma_params = update_keys_with_prefix(paligemma_params, "model.paligemma_with_expert.")
+ gemma_params = update_keys_with_prefix(gemma_params, "model.paligemma_with_expert.")
+ projection_params = update_keys_with_prefix(projection_params, "model.")
+
+ # load state dict
+ torch_dtype = PRECISIONS[precision]
+ pi0_model.load_state_dict({**paligemma_params, **gemma_params, **projection_params})
+ pi0_model = pi0_model.to(torch_dtype)
+ # pi0_tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
+
+ pi0_model.save_pretrained(output_path, safe_serialization=True)
+ # pi0_tokenizer.save_pretrained(output_path, dtype=torch_dtype)
+
+ # assert that model loads properly
+ del pi0_model
+ PI0Policy.from_pretrained(output_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--checkpoint_dir",
+ default="/raid/pablo/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim/params",
+ type=str,
+ help="Path to the ocdbt checkpoint",
+ )
+
+ parser.add_argument(
+ "--precision",
+ choices=["float32", "bfloat16", "float16"],
+ default="float32",
+ type=str,
+ help="Precision identifier for model conversion - should match the base checkpoint precision.",
+ )
+ # tokenizer is identical to paligemma, it appears
+
+ parser.add_argument(
+ "--tokenizer_hub_id",
+ default="google/paligemma-3b-pt-224",
+ type=str,
+ help="Hub path to the tokenizer to save",
+ )
+
+ parser.add_argument(
+ "--output_path",
+ required=True,
+ type=str,
+ help="Path to save converted weights to",
+ )
+
+ args = parser.parse_args()
+ convert_pi0_checkpoint(
+ checkpoint_dir=args.checkpoint_dir,
+ precision=args.precision,
+ tokenizer_id=args.tokenizer_hub_id,
+ output_path=args.output_path,
+ )
diff --git a/src/lerobot/policies/pi0/flex_attention.py b/src/lerobot/policies/pi0/flex_attention.py
new file mode 100644
index 0000000000..35628cddb4
--- /dev/null
+++ b/src/lerobot/policies/pi0/flex_attention.py
@@ -0,0 +1,141 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn.functional as F # noqa: N812
+from packaging.version import Version
+
+if Version(torch.__version__) > Version("2.5.0"):
+ # Ffex attention is only available from torch 2.5 onwards
+ from torch.nn.attention.flex_attention import (
+ _mask_mod_signature,
+ _round_up_to_multiple,
+ create_block_mask,
+ create_mask,
+ flex_attention,
+ )
+
+
+# @torch.compile(dynamic=False)
+def flex_attention_forward(
+ attention_mask: torch.Tensor,
+ batch_size: int,
+ head_dim: int,
+ query_states: torch.Tensor,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ scaling=None,
+):
+ """
+ This is defined out of classes to make compile happy.
+ """
+
+ original_dtype = query_states.dtype
+ num_att_heads = 8
+ num_key_value_heads = 1
+ num_key_value_groups = num_att_heads // num_key_value_heads
+
+ key_states = key_states[:, :, :, None, :]
+ key_states = key_states.expand(
+ batch_size, key_states.shape[1], num_key_value_heads, num_key_value_groups, head_dim
+ )
+ key_states = key_states.reshape(
+ batch_size, key_states.shape[1], num_key_value_heads * num_key_value_groups, head_dim
+ )
+
+ value_states = value_states[:, :, :, None, :]
+ value_states = value_states.expand(
+ batch_size, value_states.shape[1], num_key_value_heads, num_key_value_groups, head_dim
+ )
+ value_states = value_states.reshape(
+ batch_size, value_states.shape[1], num_key_value_heads * num_key_value_groups, head_dim
+ )
+
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ query_states = query_states.to(torch.float32)
+ key_states = key_states.to(torch.float32)
+ value_states = value_states.to(torch.float32)
+
+ causal_mask = attention_mask
+ if causal_mask is not None:
+ causal_mask = causal_mask[:, None, :, : key_states.shape[2]]
+
+ if causal_mask.shape[1] == 1 and query_states.shape[1] > 1:
+ causal_mask = causal_mask.expand(-1, query_states.shape[1], -1, -1)
+
+ def precomputed_mask_factory(precomputed_mask: torch.Tensor) -> _mask_mod_signature:
+ def mask_mod(b, h, q_idx, kv_idx):
+ # Danger zone: if b,h,q_idx,kv_idx exceed the shape, device-side assert occurs.
+ return precomputed_mask[b][h][q_idx][kv_idx]
+
+ return mask_mod
+
+ b_mask, h_mask, q_len, kv_len = causal_mask.shape # The shape of your mask
+
+ block_size = 128
+ q_len_rounded = _round_up_to_multiple(q_len, block_size)
+ kv_len_rounded = _round_up_to_multiple(kv_len, block_size)
+
+ # *CRITICAL* we do need to expand here, else we get a CUDA index error
+
+ pad_q = q_len_rounded - q_len
+ pad_k = kv_len_rounded - kv_len
+
+ padded_causal_mask = F.pad(causal_mask, (0, pad_k, 0, pad_q), value=0.0)
+ mask_mod_fn_orig = precomputed_mask_factory(padded_causal_mask)
+
+ mask_4d = create_mask(
+ mod_fn=mask_mod_fn_orig,
+ B=b_mask,
+ H=h_mask,
+ Q_LEN=q_len_rounded,
+ KV_LEN=kv_len_rounded,
+ device=causal_mask.device,
+ _compile=False,
+ )
+
+ mask_mod_fn_padded = precomputed_mask_factory(mask_4d)
+ block_mask = create_block_mask(
+ mask_mod=mask_mod_fn_padded,
+ B=b_mask,
+ H=h_mask,
+ Q_LEN=q_len_rounded,
+ KV_LEN=kv_len_rounded,
+ BLOCK_SIZE=block_size,
+ device=causal_mask.device,
+ _compile=False,
+ )
+
+ # mask is applied inside the kernel, ideally more efficiently than score_mod.
+ attn_output, attention_weights = flex_attention(
+ query_states,
+ key_states,
+ value_states,
+ block_mask=block_mask,
+ enable_gqa=True, # because we shaped query/key states for GQA
+ scale=head_dim**-0.5 if scaling is None else scaling,
+ return_lse=True,
+ )
+
+ attn_output = attn_output.to(dtype=original_dtype)
+ attn_output = attn_output.transpose(1, 2).contiguous() # [B, Q_LEN, H, head_dim]
+ attn_output = attn_output.reshape(
+ batch_size,
+ -1,
+ attn_output.shape[2] * attn_output.shape[3], # merges [H, head_dim]
+ )
+ return attn_output
diff --git a/src/lerobot/policies/pi0/modeling_pi0.py b/src/lerobot/policies/pi0/modeling_pi0.py
new file mode 100644
index 0000000000..badfb4b8c1
--- /dev/null
+++ b/src/lerobot/policies/pi0/modeling_pi0.py
@@ -0,0 +1,737 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+π0: A Vision-Language-Action Flow Model for General Robot Control
+
+[Paper](https://www.physicalintelligence.company/download/pi0.pdf)
+[Jax code](https://github.com/Physical-Intelligence/openpi)
+
+Designed by Physical Intelligence. Ported from Jax by Hugging Face.
+
+Install pi0 extra dependencies:
+```bash
+pip install -e ".[pi0]"
+```
+
+Example of finetuning the pi0 pretrained model (`pi0_base` in `openpi`):
+```bash
+python -m lerobot.scripts.train \
+--policy.path=lerobot/pi0 \
+--dataset.repo_id=danaaubakirova/koch_test
+```
+
+Example of finetuning the pi0 neural network with PaliGemma and expert Gemma
+pretrained with VLM default parameters before pi0 finetuning:
+```bash
+python -m lerobot.scripts.train \
+--policy.type=pi0 \
+--dataset.repo_id=danaaubakirova/koch_test
+```
+
+Example of using the pi0 pretrained model outside LeRobot training framework:
+```python
+policy = Pi0Policy.from_pretrained("lerobot/pi0")
+```
+
+"""
+
+import math
+from collections import deque
+
+import torch
+import torch.nn.functional as F # noqa: N812
+from torch import Tensor, nn
+from transformers import AutoTokenizer
+
+from lerobot.constants import ACTION, OBS_STATE
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pi0.configuration_pi0 import PI0Config
+from lerobot.policies.pi0.paligemma_with_expert import (
+ PaliGemmaWithExpertConfig,
+ PaliGemmaWithExpertModel,
+)
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.utils.utils import get_safe_dtype
+
+
+def create_sinusoidal_pos_embedding(
+ time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu"
+) -> Tensor:
+ """Computes sine-cosine positional embedding vectors for scalar positions."""
+ if dimension % 2 != 0:
+ raise ValueError(f"dimension ({dimension}) must be divisible by 2")
+
+ if time.ndim != 1:
+ raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
+
+ dtype = get_safe_dtype(torch.float64, device.type)
+ fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
+ period = min_period * (max_period / min_period) ** fraction
+
+ # Compute the outer product
+ scaling_factor = 1.0 / period * 2 * math.pi
+ sin_input = scaling_factor[None, :] * time[:, None]
+ pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
+ return pos_emb
+
+
+def sample_beta(alpha, beta, bsize, device):
+ gamma1 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / alpha)
+ gamma2 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / beta)
+ return gamma1 / (gamma1 + gamma2)
+
+
+def make_att_2d_masks(pad_masks, att_masks):
+ """Copied from big_vision.
+
+ Tokens can attend to valid inputs tokens which have a cumulative mask_ar
+ smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
+ setup several types of attention, for example:
+
+ [[1 1 1 1 1 1]]: pure causal attention.
+
+ [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
+ themselves and the last 3 tokens have a causal attention. The first
+ entry could also be a 1 without changing behaviour.
+
+ [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
+ block can attend all previous blocks and all tokens on the same block.
+
+ Args:
+ input_mask: bool[B, N] true if its part of the input, false if padding.
+ mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on
+ it and 0 where it shares the same attention mask as the previous token.
+ """
+ if att_masks.ndim != 2:
+ raise ValueError(att_masks.ndim)
+ if pad_masks.ndim != 2:
+ raise ValueError(pad_masks.ndim)
+
+ cumsum = torch.cumsum(att_masks, dim=1)
+ att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
+ pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
+ att_2d_masks = att_2d_masks & pad_2d_masks
+ return att_2d_masks
+
+
+def resize_with_pad(img, width, height, pad_value=-1):
+ # assume no-op when width height fits already
+ if img.ndim != 4:
+ raise ValueError(f"(b,c,h,w) expected, but {img.shape}")
+
+ cur_height, cur_width = img.shape[2:]
+
+ ratio = max(cur_width / width, cur_height / height)
+ resized_height = int(cur_height / ratio)
+ resized_width = int(cur_width / ratio)
+ resized_img = F.interpolate(
+ img, size=(resized_height, resized_width), mode="bilinear", align_corners=False
+ )
+
+ pad_height = max(0, int(height - resized_height))
+ pad_width = max(0, int(width - resized_width))
+
+ # pad on left and top of image
+ padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value)
+ return padded_img
+
+
+def pad_vector(vector, new_dim):
+ """Can be (batch_size x sequence_length x features_dimension)
+ or (batch_size x features_dimension)
+ """
+ if vector.shape[-1] == new_dim:
+ return vector
+ shape = list(vector.shape)
+ current_dim = shape[-1]
+ shape[-1] = new_dim
+ new_vector = torch.zeros(*shape, dtype=vector.dtype, device=vector.device)
+ new_vector[..., :current_dim] = vector
+ return new_vector
+
+
+def normalize(x, min_val, max_val):
+ return (x - min_val) / (max_val - min_val)
+
+
+def unnormalize(x, min_val, max_val):
+ return x * (max_val - min_val) + min_val
+
+
+def safe_arcsin(value):
+ # This ensures that the input stays within
+ # [−1,1] to avoid invalid values for arcsin
+ return torch.arcsin(torch.clamp(value, -1.0, 1.0))
+
+
+def aloha_gripper_to_angular(value):
+ # Aloha transforms the gripper positions into a linear space. The following code
+ # reverses this transformation to be consistent with pi0 which is pretrained in
+ # angular space.
+ #
+ # These values are coming from the Aloha code:
+ # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
+ value = unnormalize(value, min_val=0.01844, max_val=0.05800)
+
+ # This is the inverse of the angular to linear transformation inside the Interbotix code.
+ def linear_to_radian(linear_position, arm_length, horn_radius):
+ value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
+ return safe_arcsin(value)
+
+ # The constants are taken from the Interbotix code.
+ value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
+
+ # Normalize to [0, 1].
+ # The values 0.4 and 1.5 were measured on an actual Trossen robot.
+ return normalize(value, min_val=0.4, max_val=1.5)
+
+
+def aloha_gripper_from_angular(value):
+ # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha.
+ # Note that the units are still angular but the range is different.
+
+ # The values 0.4 and 1.5 were measured on an actual Trossen robot.
+ value = unnormalize(value, min_val=0.4, max_val=1.5)
+
+ # These values are coming from the Aloha code:
+ # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
+ return normalize(value, min_val=-0.6213, max_val=1.4910)
+
+
+def aloha_gripper_from_angular_inv(value):
+ # Directly inverts the gripper_from_angular function.
+ value = unnormalize(value, min_val=-0.6213, max_val=1.4910)
+ return normalize(value, min_val=0.4, max_val=1.5)
+
+
+class PI0Policy(PreTrainedPolicy):
+ """Wrapper class around PI0FlowMatching model to train and run inference within LeRobot."""
+
+ config_class = PI0Config
+ name = "pi0"
+
+ def __init__(
+ self,
+ config: PI0Config,
+ dataset_stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ """
+ Args:
+ config: Policy configuration class instance or None, in which case the default instantiation of
+ the configuration class is used.
+ dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
+ that they will be passed with a call to `load_state_dict` before the policy is used.
+ """
+
+ super().__init__(config)
+ config.validate_features()
+ self.config = config
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
+ self.normalize_targets = Normalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+ self.unnormalize_outputs = Unnormalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+
+ self.language_tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224")
+ self.model = PI0FlowMatching(config)
+
+ self.reset()
+
+ def reset(self):
+ """This should be called whenever the environment is reset."""
+ self._action_queue = deque([], maxlen=self.config.n_action_steps)
+
+ def get_optim_params(self) -> dict:
+ return self.parameters()
+
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Predict a chunk of actions given environment observations."""
+ raise NotImplementedError("Currently not implemented for PI0")
+
+ @torch.no_grad()
+ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
+ """Select a single action given environment observations.
+
+ This method wraps `select_actions` in order to return one action at a time for execution in the
+ environment. It works by managing the actions in a queue and only calling `select_actions` when the
+ queue is empty.
+ """
+ self.eval()
+
+ if self.config.adapt_to_pi_aloha:
+ batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
+
+ batch = self.normalize_inputs(batch)
+
+ # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
+ # querying the policy.
+ if len(self._action_queue) == 0:
+ images, img_masks = self.prepare_images(batch)
+ state = self.prepare_state(batch)
+ lang_tokens, lang_masks = self.prepare_language(batch)
+
+ actions = self.model.sample_actions(
+ images, img_masks, lang_tokens, lang_masks, state, noise=noise
+ )
+
+ # Unpad actions
+ original_action_dim = self.config.action_feature.shape[0]
+ actions = actions[:, :, :original_action_dim]
+
+ actions = self.unnormalize_outputs({"action": actions})["action"]
+
+ if self.config.adapt_to_pi_aloha:
+ actions = self._pi_aloha_encode_actions(actions)
+
+ # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
+ # effectively has shape (n_action_steps, batch_size, *), hence the transpose.
+ self._action_queue.extend(actions.transpose(0, 1))
+ return self._action_queue.popleft()
+
+ def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> tuple[Tensor, dict[str, Tensor]]:
+ """Do a full training forward pass to compute the loss"""
+ if self.config.adapt_to_pi_aloha:
+ batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
+ batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
+
+ batch = self.normalize_inputs(batch)
+ batch = self.normalize_targets(batch)
+
+ images, img_masks = self.prepare_images(batch)
+ state = self.prepare_state(batch)
+ lang_tokens, lang_masks = self.prepare_language(batch)
+ actions = self.prepare_action(batch)
+ actions_is_pad = batch.get("action_is_pad")
+
+ loss_dict = {}
+ losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
+ loss_dict["losses_after_forward"] = losses.clone()
+
+ if actions_is_pad is not None:
+ in_episode_bound = ~actions_is_pad
+ losses = losses * in_episode_bound.unsqueeze(-1)
+ loss_dict["losses_after_in_ep_bound"] = losses.clone()
+
+ # Remove padding
+ losses = losses[:, :, : self.config.max_action_dim]
+ loss_dict["losses_after_rm_padding"] = losses.clone()
+
+ # For backward pass
+ loss = losses.mean()
+ # For logging
+ loss_dict["l2_loss"] = loss.item()
+
+ return loss, loss_dict
+
+ def prepare_images(self, batch):
+ """Apply Pi0 preprocessing to the images, like resizing to 224x224 and padding to keep aspect ratio, and
+ convert pixel range from [0.0, 1.0] to [-1.0, 1.0] as requested by SigLIP.
+ """
+ images = []
+ img_masks = []
+
+ present_img_keys = [key for key in self.config.image_features if key in batch]
+ missing_img_keys = [key for key in self.config.image_features if key not in batch]
+
+ if len(present_img_keys) == 0:
+ raise ValueError(
+ f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})"
+ )
+
+ # Preprocess image features present in the batch
+ for key in present_img_keys:
+ img = batch[key]
+
+ if self.config.resize_imgs_with_padding is not None:
+ img = resize_with_pad(img, *self.config.resize_imgs_with_padding, pad_value=0)
+
+ # Normalize from range [0,1] to [-1,1] as expected by siglip
+ img = img * 2.0 - 1.0
+
+ bsize = img.shape[0]
+ device = img.device
+ mask = torch.ones(bsize, dtype=torch.bool, device=device)
+ images.append(img)
+ img_masks.append(mask)
+
+ # Create image features not present in the batch
+ # as fully 0 padded images.
+ for num_empty_cameras in range(len(missing_img_keys)):
+ if num_empty_cameras >= self.config.empty_cameras:
+ break
+ img = torch.ones_like(img) * -1
+ mask = torch.zeros_like(mask)
+ images.append(img)
+ img_masks.append(mask)
+
+ return images, img_masks
+
+ def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
+ """Tokenize the text input"""
+ device = batch[OBS_STATE].device
+ tasks = batch["task"]
+
+ # PaliGemma prompt has to end with a new line
+ tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks]
+
+ tokenized_prompt = self.language_tokenizer.__call__(
+ tasks,
+ padding="max_length",
+ padding_side="right",
+ max_length=self.config.tokenizer_max_length,
+ return_tensors="pt",
+ )
+ lang_tokens = tokenized_prompt["input_ids"].to(device=device)
+ lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool)
+
+ return lang_tokens, lang_masks
+
+ def _pi_aloha_decode_state(self, state):
+ # Flip the joints.
+ for motor_idx in [1, 2, 8, 9]:
+ state[:, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx])
+ return state
+
+ def _pi_aloha_encode_actions(self, actions):
+ # Flip the joints.
+ for motor_idx in [1, 2, 8, 9]:
+ actions[:, :, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx])
+ return actions
+
+ def _pi_aloha_encode_actions_inv(self, actions):
+ # Flip the joints again.
+ for motor_idx in [1, 2, 8, 9]:
+ actions[:, :, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx])
+ return actions
+
+ def prepare_state(self, batch):
+ """Pad state"""
+ state = pad_vector(batch[OBS_STATE], self.config.max_state_dim)
+ return state
+
+ def prepare_action(self, batch):
+ """Pad action"""
+ actions = pad_vector(batch[ACTION], self.config.max_action_dim)
+ return actions
+
+
+class PI0FlowMatching(nn.Module):
+ """
+ π0: A Vision-Language-Action Flow Model for General Robot Control
+
+ [Paper](https://www.physicalintelligence.company/download/pi0.pdf)
+ [Jax code](https://github.com/Physical-Intelligence/openpi)
+
+ Designed by Physical Intelligence. Ported from Jax by Hugging Face.
+ ┌──────────────────────────────┐
+ │ actions │
+ │ ▲ │
+ │ ┌┴─────┐ │
+ │ kv cache │Gemma │ │
+ │ ┌──────────►│Expert│ │
+ │ │ │ │ │
+ │ ┌┴────────┐ │x 10 │ │
+ │ │ │ └▲──▲──┘ │
+ │ │PaliGemma│ │ │ │
+ │ │ │ │ robot state │
+ │ │ │ noise │
+ │ └▲──▲─────┘ │
+ │ │ │ │
+ │ │ image(s) │
+ │ language tokens │
+ └──────────────────────────────┘
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ paligemma_with_export_config = PaliGemmaWithExpertConfig(
+ freeze_vision_encoder=self.config.freeze_vision_encoder,
+ train_expert_only=self.config.train_expert_only,
+ attention_implementation=self.config.attention_implementation,
+ )
+ self.paligemma_with_expert = PaliGemmaWithExpertModel(paligemma_with_export_config)
+
+ # Projections are float32
+ self.state_proj = nn.Linear(self.config.max_state_dim, self.config.proj_width)
+ self.action_in_proj = nn.Linear(self.config.max_action_dim, self.config.proj_width)
+ self.action_out_proj = nn.Linear(self.config.proj_width, self.config.max_action_dim)
+
+ self.action_time_mlp_in = nn.Linear(self.config.proj_width * 2, self.config.proj_width)
+ self.action_time_mlp_out = nn.Linear(self.config.proj_width, self.config.proj_width)
+
+ self.set_requires_grad()
+
+ def set_requires_grad(self):
+ for params in self.state_proj.parameters():
+ params.requires_grad = self.config.train_state_proj
+
+ def sample_noise(self, shape, device):
+ noise = torch.normal(
+ mean=0.0,
+ std=1.0,
+ size=shape,
+ dtype=torch.float32,
+ device=device,
+ )
+ return noise
+
+ def sample_time(self, bsize, device):
+ time_beta = sample_beta(1.5, 1.0, bsize, device)
+ time = time_beta * 0.999 + 0.001
+ return time.to(dtype=torch.float32, device=device)
+
+ def embed_prefix(
+ self, images, img_masks, lang_tokens, lang_masks
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Embed images with SigLIP and language tokens with embedding layer to prepare
+ for PaliGemma transformer processing.
+ """
+ # TODO: avoid list in python and torch.cat ; prefer pre-allocation with torch.empty
+ embs = []
+ pad_masks = []
+ att_masks = []
+
+ # TODO: remove for loop
+ for (
+ img,
+ img_mask,
+ ) in zip(images, img_masks, strict=False):
+ img_emb = self.paligemma_with_expert.embed_image(img)
+ img_emb = img_emb.to(dtype=torch.bfloat16)
+
+ # Normalize image embeddings
+ img_emb_dim = img_emb.shape[-1]
+ img_emb = img_emb * torch.tensor(img_emb_dim**0.5, dtype=img_emb.dtype, device=img_emb.device)
+
+ bsize, num_img_embs = img_emb.shape[:2]
+ img_mask = img_mask[:, None].expand(bsize, num_img_embs)
+
+ embs.append(img_emb)
+ pad_masks.append(img_mask)
+
+ # Create attention masks so that image tokens attend to each other
+ att_masks += [0] * num_img_embs
+
+ lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens)
+
+ # Normalize language embeddings
+ lang_emb_dim = lang_emb.shape[-1]
+ lang_emb = lang_emb * math.sqrt(lang_emb_dim)
+
+ embs.append(lang_emb)
+ pad_masks.append(lang_masks)
+
+ # full attention between image and language inputs
+ num_lang_embs = lang_emb.shape[1]
+ att_masks += [0] * num_lang_embs
+
+ embs = torch.cat(embs, dim=1)
+ pad_masks = torch.cat(pad_masks, dim=1)
+ att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device)
+ att_masks = att_masks[None, :].expand(bsize, len(att_masks))
+
+ return embs, pad_masks, att_masks
+
+ def embed_suffix(self, state, noisy_actions, timestep):
+ """Embed state, noisy_actions, timestep to prepare for Expert Gemma processing."""
+ embs = []
+ pad_masks = []
+ att_masks = []
+
+ # Embed state
+ state_emb = self.state_proj(state)
+ state_emb = state_emb.to(dtype=torch.bfloat16)
+ embs.append(state_emb[:, None, :])
+ bsize = state_emb.shape[0]
+ dtype = state_emb.dtype
+ device = state_emb.device
+
+ state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device)
+ pad_masks.append(state_mask)
+
+ # Set attention masks so that image and language inputs do not attend to state or actions
+ att_masks += [1]
+
+ # Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
+ time_emb = create_sinusoidal_pos_embedding(
+ timestep, self.config.proj_width, min_period=4e-3, max_period=4.0, device=device
+ )
+ time_emb = time_emb.type(dtype=dtype)
+
+ # Fuse timestep + action information using an MLP
+ action_emb = self.action_in_proj(noisy_actions)
+
+ time_emb = time_emb[:, None, :].expand_as(action_emb)
+ action_time_emb = torch.cat([action_emb, time_emb], dim=2)
+
+ action_time_emb = self.action_time_mlp_in(action_time_emb)
+ action_time_emb = F.silu(action_time_emb) # swish == silu
+ action_time_emb = self.action_time_mlp_out(action_time_emb)
+
+ # Add to input tokens
+ embs.append(action_time_emb)
+
+ bsize, action_time_dim = action_time_emb.shape[:2]
+ action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device)
+ pad_masks.append(action_time_mask)
+
+ # Set attention masks so that image, language and state inputs do not attend to action tokens
+ att_masks += [1] + ([0] * (self.config.n_action_steps - 1))
+
+ embs = torch.cat(embs, dim=1)
+ pad_masks = torch.cat(pad_masks, dim=1)
+ att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
+ att_masks = att_masks[None, :].expand(bsize, len(att_masks))
+
+ return embs, pad_masks, att_masks
+
+ def forward(
+ self, images, img_masks, lang_tokens, lang_masks, state, actions, noise=None, time=None
+ ) -> Tensor:
+ """Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)"""
+ if noise is None:
+ noise = self.sample_noise(actions.shape, actions.device)
+
+ if time is None:
+ time = self.sample_time(actions.shape[0], actions.device)
+
+ time_expanded = time[:, None, None]
+ x_t = time_expanded * noise + (1 - time_expanded) * actions
+ u_t = noise - actions
+
+ prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
+ images, img_masks, lang_tokens, lang_masks
+ )
+ suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(state, x_t, time)
+
+ pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
+ att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
+
+ att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
+ position_ids = torch.cumsum(pad_masks, dim=1) - 1
+
+ (_, suffix_out), _ = self.paligemma_with_expert.forward(
+ attention_mask=att_2d_masks,
+ position_ids=position_ids,
+ past_key_values=None,
+ inputs_embeds=[prefix_embs, suffix_embs],
+ use_cache=False,
+ fill_kv_cache=False,
+ )
+ suffix_out = suffix_out[:, -self.config.n_action_steps :]
+ # Original openpi code, upcast attention output
+ suffix_out = suffix_out.to(dtype=torch.float32)
+ v_t = self.action_out_proj(suffix_out)
+
+ losses = F.mse_loss(u_t, v_t, reduction="none")
+ return losses
+
+ def sample_actions(self, images, img_masks, lang_tokens, lang_masks, state, noise=None) -> Tensor:
+ """Do a full inference forward and compute the action (batch_size x num_steps x num_motors)"""
+ bsize = state.shape[0]
+ device = state.device
+
+ if noise is None:
+ actions_shape = (bsize, self.config.n_action_steps, self.config.max_action_dim)
+ noise = self.sample_noise(actions_shape, device)
+
+ prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
+ images, img_masks, lang_tokens, lang_masks
+ )
+ prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
+ prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
+
+ # Compute image and language key value cache
+ _, past_key_values = self.paligemma_with_expert.forward(
+ attention_mask=prefix_att_2d_masks,
+ position_ids=prefix_position_ids,
+ past_key_values=None,
+ inputs_embeds=[prefix_embs, None],
+ use_cache=self.config.use_cache,
+ fill_kv_cache=True,
+ )
+
+ dt = -1.0 / self.config.num_steps
+ dt = torch.tensor(dt, dtype=torch.float32, device=device)
+
+ x_t = noise
+ time = torch.tensor(1.0, dtype=torch.float32, device=device)
+ while time >= -dt / 2:
+ expanded_time = time.expand(bsize)
+ v_t = self.denoise_step(
+ state,
+ prefix_pad_masks,
+ past_key_values,
+ x_t,
+ expanded_time,
+ )
+
+ # Euler step
+ x_t += dt * v_t
+ time += dt
+ return x_t
+
+ def denoise_step(
+ self,
+ state,
+ prefix_pad_masks,
+ past_key_values,
+ x_t,
+ timestep,
+ ):
+ """Apply one denoising step of the noise `x_t` at a given timestep."""
+ suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(state, x_t, timestep)
+
+ suffix_len = suffix_pad_masks.shape[1]
+ batch_size = prefix_pad_masks.shape[0]
+ prefix_len = prefix_pad_masks.shape[1]
+ prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
+
+ suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
+
+ full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
+
+ prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
+ position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
+
+ outputs_embeds, _ = self.paligemma_with_expert.forward(
+ attention_mask=full_att_2d_masks,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=[None, suffix_embs],
+ use_cache=self.config.use_cache,
+ fill_kv_cache=False,
+ )
+ suffix_out = outputs_embeds[1]
+ suffix_out = suffix_out[:, -self.config.n_action_steps :]
+ suffix_out = suffix_out.to(dtype=torch.float32)
+ v_t = self.action_out_proj(suffix_out)
+ return v_t
diff --git a/src/lerobot/policies/pi0/paligemma_with_expert.py b/src/lerobot/policies/pi0/paligemma_with_expert.py
new file mode 100644
index 0000000000..edc34b7c56
--- /dev/null
+++ b/src/lerobot/policies/pi0/paligemma_with_expert.py
@@ -0,0 +1,420 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import torch
+import torch.version
+from pytest import Cache
+from torch import nn
+from transformers import (
+ AutoConfig,
+ GemmaForCausalLM,
+ PaliGemmaForConditionalGeneration,
+ PretrainedConfig,
+ PreTrainedModel,
+)
+from transformers.models.auto import CONFIG_MAPPING
+
+from lerobot.policies.pi0.flex_attention import flex_attention_forward
+
+
+def apply_rope(x, positions, max_wavelength=10_000):
+ """
+ Applies RoPE positions [B, L] to x [B, L, H, D].
+ """
+ d_half = x.shape[-1] // 2
+ device = x.device
+ dtype = x.dtype
+ x = x.to(torch.float32)
+
+ freq_exponents = (2.0 / x.shape[-1]) * torch.arange(d_half, dtype=torch.float32, device=device)
+ timescale = max_wavelength**freq_exponents
+ radians = positions[..., None].to(torch.float32) / timescale[None, None, :].to(torch.float32)
+
+ radians = radians[..., None, :]
+
+ sin = torch.sin(radians) # .to(dtype=dtype)
+ cos = torch.cos(radians) # .to(dtype=dtype)
+
+ x1, x2 = x.split(d_half, dim=-1)
+ res = torch.empty_like(x)
+ res[..., :d_half] = x1 * cos - x2 * sin
+ res[..., d_half:] = x2 * cos + x1 * sin
+
+ return res.to(dtype)
+
+
+class PaliGemmaWithExpertConfig(PretrainedConfig):
+ model_type = "PaliGemmaWithExpertModel"
+ sub_configs = {"paligemma_config": AutoConfig, "gemma_expert_config": AutoConfig}
+
+ def __init__(
+ self,
+ paligemma_config: dict | None = None,
+ gemma_expert_config: dict | None = None,
+ freeze_vision_encoder: bool = True,
+ train_expert_only: bool = True,
+ attention_implementation: str = "eager",
+ **kwargs,
+ ):
+ self.freeze_vision_encoder = freeze_vision_encoder
+ self.train_expert_only = train_expert_only
+ self.attention_implementation = attention_implementation
+
+ if paligemma_config is None:
+ # Default config from Pi0
+ self.paligemma_config = CONFIG_MAPPING["paligemma"](
+ transformers_version="4.48.1",
+ _vocab_size=257152,
+ bos_token_id=2,
+ eos_token_id=1,
+ hidden_size=2048,
+ image_token_index=257152,
+ model_type="paligemma",
+ pad_token_id=0,
+ projection_dim=2048,
+ text_config={
+ "hidden_activation": "gelu_pytorch_tanh",
+ "hidden_size": 2048,
+ "intermediate_size": 16384,
+ "model_type": "gemma",
+ "num_attention_heads": 8,
+ "num_hidden_layers": 18,
+ "num_image_tokens": 256,
+ "num_key_value_heads": 1,
+ "torch_dtype": "float32",
+ "vocab_size": 257152,
+ },
+ vision_config={
+ "hidden_size": 1152,
+ "intermediate_size": 4304,
+ "model_type": "siglip_vision_model",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 27,
+ "num_image_tokens": 256,
+ "patch_size": 14,
+ "projection_dim": 2048,
+ "projector_hidden_act": "gelu_fast",
+ "torch_dtype": "float32",
+ "vision_use_head": False,
+ },
+ )
+ elif isinstance(self.paligemma_config, dict):
+ # Override Pi0 default config for PaliGemma
+ if "model_type" not in gemma_expert_config:
+ paligemma_config["model_type"] = "paligemma"
+
+ cfg_cls = CONFIG_MAPPING[paligemma_config["model_type"]]
+ self.paligemma_config = cfg_cls(**paligemma_config)
+
+ if gemma_expert_config is None:
+ # Default config from Pi0
+ self.gemma_expert_config = CONFIG_MAPPING["gemma"](
+ attention_bias=False,
+ attention_dropout=0.0,
+ bos_token_id=2,
+ eos_token_id=1,
+ head_dim=256,
+ hidden_act="gelu_pytorch_tanh",
+ hidden_activation="gelu_pytorch_tanh",
+ hidden_size=1024,
+ initializer_range=0.02,
+ intermediate_size=4096,
+ max_position_embeddings=8192,
+ model_type="gemma",
+ num_attention_heads=8,
+ num_hidden_layers=18,
+ num_key_value_heads=1,
+ pad_token_id=0,
+ rms_norm_eps=1e-06,
+ rope_theta=10000.0,
+ torch_dtype="float32",
+ transformers_version="4.48.1",
+ use_cache=True,
+ vocab_size=257152,
+ )
+ elif isinstance(self.gemma_expert_config, dict):
+ # Override Pi0 default config for Gemma Expert
+ if "model_type" not in gemma_expert_config:
+ gemma_expert_config["model_type"] = "gemma"
+
+ cfg_cls = CONFIG_MAPPING[paligemma_config["model_type"]]
+ self.gemma_expert_config = cfg_cls(**gemma_expert_config)
+
+ super().__init__(**kwargs)
+
+ def __post_init__(self):
+ super().__post_init__()
+ if self.train_expert_only and not self.freeze_vision_encoder:
+ raise ValueError(
+ "You set `freeze_vision_encoder=False` and `train_expert_only=True` which are not compatible."
+ )
+
+ if self.attention_implementation not in ["eager", "fa2", "flex"]:
+ raise ValueError(
+ f"Wrong value provided for `attention_implementation` ({self.attention_implementation}). Expected 'eager', 'fa2' or 'flex'."
+ )
+
+
+class PaliGemmaWithExpertModel(PreTrainedModel):
+ config_class = PaliGemmaWithExpertConfig
+
+ def __init__(self, config: PaliGemmaWithExpertConfig):
+ super().__init__(config=config)
+ self.config = config
+ self.paligemma = PaliGemmaForConditionalGeneration(config=config.paligemma_config)
+ self.gemma_expert = GemmaForCausalLM(config=config.gemma_expert_config)
+ # Remove unused embed_tokens
+ self.gemma_expert.model.embed_tokens = None
+
+ self.to_bfloat16_like_physical_intelligence()
+ self.set_requires_grad()
+
+ def set_requires_grad(self):
+ if self.config.freeze_vision_encoder:
+ self.paligemma.vision_tower.eval()
+ for params in self.paligemma.vision_tower.parameters():
+ params.requires_grad = False
+
+ if self.config.train_expert_only:
+ self.paligemma.eval()
+ for params in self.paligemma.parameters():
+ params.requires_grad = False
+
+ def train(self, mode: bool = True):
+ super().train(mode)
+
+ if self.config.freeze_vision_encoder:
+ self.paligemma.vision_tower.eval()
+
+ if self.config.train_expert_only:
+ self.paligemma.eval()
+
+ def to_bfloat16_like_physical_intelligence(self):
+ self.paligemma = self.paligemma.to(dtype=torch.bfloat16)
+
+ params_to_change_dtype = [
+ "language_model.model.layers",
+ "gemma_expert.model.layers",
+ "vision_tower",
+ "multi_modal",
+ ]
+ for name, param in self.named_parameters():
+ if any(selector in name for selector in params_to_change_dtype):
+ param.data = param.data.to(dtype=torch.bfloat16)
+
+ def embed_image(self, image: torch.Tensor):
+ # Handle different transformers versions
+ if hasattr(self.paligemma, "get_image_features"):
+ return self.paligemma.get_image_features(image)
+ else:
+ return self.paligemma.model.get_image_features(image)
+
+ def embed_language_tokens(self, tokens: torch.Tensor):
+ return self.paligemma.language_model.embed_tokens(tokens)
+
+ # TODO: break down this huge forward into modules or functions
+ def forward(
+ self,
+ attention_mask: torch.Tensor | None = None,
+ position_ids: torch.LongTensor | None = None,
+ past_key_values: list[torch.FloatTensor] | Cache | None = None,
+ inputs_embeds: list[torch.FloatTensor] = None,
+ use_cache: bool | None = None,
+ fill_kv_cache: bool | None = None,
+ ):
+ models = [self.paligemma.language_model, self.gemma_expert.model]
+
+ for hidden_states in inputs_embeds:
+ # TODO this is very inefficient
+ # dtype is always the same, batch size too (if > 1 len)
+ # device could be trickier in multi gpu edge cases but that's it
+ if hidden_states is None:
+ continue
+ batch_size = hidden_states.shape[0]
+
+ # RMSNorm
+ num_layers = self.paligemma.config.text_config.num_hidden_layers
+ head_dim = self.paligemma.config.text_config.head_dim
+ for layer_idx in range(num_layers):
+ query_states = []
+ key_states = []
+ value_states = []
+ for i, hidden_states in enumerate(inputs_embeds):
+ if hidden_states is None:
+ continue
+ layer = models[i].layers[layer_idx]
+ # normalizer = torch.tensor(models[i].config.hidden_size**0.5, dtype=hidden_states.dtype)
+ # hidden_states = hidden_states * normalizer
+ hidden_states = layer.input_layernorm(hidden_states)
+
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
+
+ hidden_states = hidden_states.to(dtype=torch.bfloat16)
+ query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape)
+ key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape)
+ value_state = layer.self_attn.v_proj(hidden_states).view(hidden_shape)
+
+ query_states.append(query_state)
+ key_states.append(key_state)
+ value_states.append(value_state)
+
+ # B,L,H,D with L sequence length, H number of heads, D head dim
+ # concatenate on the number of embeddings/tokens
+ query_states = torch.cat(query_states, dim=1)
+ key_states = torch.cat(key_states, dim=1)
+ value_states = torch.cat(value_states, dim=1)
+
+ query_states = apply_rope(query_states, position_ids)
+ key_states = apply_rope(key_states, position_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = {}
+
+ if use_cache:
+ if fill_kv_cache:
+ past_key_values[layer_idx] = {
+ "key_states": key_states,
+ "value_states": value_states,
+ }
+ else:
+ # TODO here, some optimization can be done - similar to a `StaticCache` we can declare the `max_len` before.
+ # so we create an empty cache, with just one cuda malloc, and if (in autoregressive case) we reach
+ # the max len, then we (for instance) double the cache size. This implementation already exists
+ # in `transformers`. (molbap)
+ key_states = torch.cat([past_key_values[layer_idx]["key_states"], key_states], dim=1)
+ value_states = torch.cat(
+ [past_key_values[layer_idx]["value_states"], value_states], dim=1
+ )
+
+ attention_interface = self.get_attention_interface()
+ att_output = attention_interface(
+ attention_mask, batch_size, head_dim, query_states, key_states, value_states
+ )
+ att_output = att_output.to(dtype=torch.bfloat16)
+
+ # first part of att_output is prefix (up to sequence length, [:, 0:prefix_seq_len])
+ outputs_embeds = []
+ start = 0
+ for i, hidden_states in enumerate(inputs_embeds):
+ layer = models[i].layers[layer_idx]
+
+ if hidden_states is not None:
+ end = start + hidden_states.shape[1]
+
+ if att_output.dtype != layer.self_attn.o_proj.weight.dtype:
+ att_output = att_output.to(layer.self_attn.o_proj.weight.dtype)
+ out_emb = layer.self_attn.o_proj(att_output[:, start:end])
+
+ # TODO: first dropout (by default 0.0)
+
+ # first residual
+ out_emb += hidden_states
+ after_first_residual = out_emb.clone()
+
+ out_emb = layer.post_attention_layernorm(out_emb)
+ out_emb = layer.mlp(out_emb)
+
+ # TODO: second dropout (by default 0.0)
+
+ # second residual
+ out_emb += after_first_residual
+
+ outputs_embeds.append(out_emb)
+
+ start = end
+ else:
+ outputs_embeds.append(None)
+
+ inputs_embeds = outputs_embeds
+
+ # final norm
+ outputs_embeds = []
+ for i, hidden_states in enumerate(inputs_embeds):
+ if hidden_states is not None:
+ out_emb = models[i].norm(hidden_states)
+ outputs_embeds.append(out_emb)
+ else:
+ outputs_embeds.append(None)
+
+ return outputs_embeds, past_key_values
+
+ def get_attention_interface(self):
+ if self.config.attention_implementation == "fa2":
+ attention_interface = self.flash_attention_forward
+ elif self.config.attention_implementation == "flex":
+ attention_interface = flex_attention_forward
+ else:
+ attention_interface = self.eager_attention_forward
+ return attention_interface
+
+ def flash_attention_forward(
+ self, attention_mask, batch_size, head_dim, query_states, key_states, value_states
+ ):
+ raise NotImplementedError("FA2 is not implemented (yet)")
+
+ def eager_attention_forward(
+ self, attention_mask, batch_size, head_dim, query_states, key_states, value_states
+ ):
+ num_att_heads = self.config.paligemma_config.text_config.num_attention_heads
+ num_key_value_heads = self.config.paligemma_config.text_config.num_key_value_heads
+ num_key_value_groups = num_att_heads // num_key_value_heads
+
+ # query_states: batch_size, sequence_length, num_att_head, head_dim
+ # key_states: batch_size, sequence_length, num_key_value_head, head_dim
+ # value_states: batch_size, sequence_length, num_key_value_head, head_dim
+ sequence_length = key_states.shape[1]
+
+ key_states = key_states[:, :, :, None, :].expand(
+ batch_size, sequence_length, num_key_value_heads, num_key_value_groups, head_dim
+ )
+ key_states = key_states.reshape(
+ batch_size, sequence_length, num_key_value_heads * num_key_value_groups, head_dim
+ )
+
+ value_states = value_states[:, :, :, None, :].expand(
+ batch_size, sequence_length, num_key_value_heads, num_key_value_groups, head_dim
+ )
+ value_states = value_states.reshape(
+ batch_size, sequence_length, num_key_value_heads * num_key_value_groups, head_dim
+ )
+
+ # Attention here is upcasted to float32 to match the original eager implementation.
+
+ query_states = query_states.to(dtype=torch.float32)
+ key_states = key_states.to(dtype=torch.float32)
+
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+
+ att_weights = torch.matmul(query_states, key_states.transpose(2, 3))
+ att_weights *= head_dim**-0.5
+ big_neg = -2.3819763e38 # See gemma/modules.py
+
+ masked_att_weights = torch.where(attention_mask[:, None, :, :], att_weights, big_neg)
+
+ probs = nn.functional.softmax(masked_att_weights, dim=-1)
+ probs = probs.to(dtype=value_states.dtype)
+
+ # probs: batch_size, num_key_value_head, num_att_head, sequence_length, sequence_length
+ # value_states: batch_size, sequence_length, num_att_heads, head_dim
+
+ att_output = torch.matmul(probs, value_states.permute(0, 2, 1, 3))
+
+ att_output = att_output.permute(0, 2, 1, 3)
+ # we use -1 because sequence length can change
+ att_output = att_output.reshape(batch_size, -1, num_key_value_heads * num_key_value_groups * head_dim)
+
+ return att_output
diff --git a/src/lerobot/policies/pi0fast/configuration_pi0fast.py b/src/lerobot/policies/pi0fast/configuration_pi0fast.py
new file mode 100644
index 0000000000..b72bcd7359
--- /dev/null
+++ b/src/lerobot/policies/pi0fast/configuration_pi0fast.py
@@ -0,0 +1,136 @@
+from dataclasses import dataclass, field
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
+from lerobot.optim.optimizers import AdamWConfig
+from lerobot.optim.schedulers import (
+ CosineDecayWithWarmupSchedulerConfig,
+)
+
+
+@PreTrainedConfig.register_subclass("pi0fast")
+@dataclass
+class PI0FASTConfig(PreTrainedConfig):
+ # Input / output structure.
+ n_obs_steps: int = 1
+ chunk_size: int = 10
+ n_action_steps: int = 5
+
+ normalization_mapping: dict[str, NormalizationMode] = field(
+ default_factory=lambda: {
+ "VISUAL": NormalizationMode.IDENTITY,
+ "STATE": NormalizationMode.MEAN_STD,
+ "ACTION": NormalizationMode.MEAN_STD,
+ }
+ )
+
+ # Shorter state and action vectors will be padded
+ max_state_dim: int = 32 # 32
+ max_action_dim: int = 32 # 32
+
+ # Image preprocessing
+ resize_imgs_with_padding: tuple[int, int] = (224, 224)
+ interpolate_like_pi: bool = False
+
+ # Add empty images. Used by pi0_aloha_sim which adds the empty
+ # left and right wrist cameras in addition to the top camera.
+ empty_cameras: int = 0
+
+ # Converts the joint and gripper values from the standard Aloha space to
+ # the space used by the pi internal runtime which was used to train the base model.
+ adapt_to_pi_aloha: bool = False
+
+ # Converts joint dimensions to deltas with respect to the current state before passing to the model.
+ # Gripper dimensions will remain in absolute values.
+ use_delta_joint_actions_aloha: bool = False
+
+ # Tokenizer
+ tokenizer_max_length: int = 48
+
+ # Projector
+ proj_width: int = 1024
+
+ # Decoding
+ max_decoding_steps: int = 256
+ fast_skip_tokens: int = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens
+ max_input_seq_len: int = 256 # 512
+
+ # Utils
+ use_cache: bool = True
+
+ # Frozen parameters
+ freeze_vision_encoder: bool = True
+ freeze_lm_head: bool = True
+
+ # Training presets
+ optimizer_lr: float = 1e-4
+ optimizer_betas: tuple[float, float] = (0.9, 0.95)
+ optimizer_eps: float = 1e-8
+ optimizer_weight_decay: float = 1e-5
+
+ scheduler_warmup_steps: int = 1_000
+ scheduler_decay_steps: int = 30_000
+ scheduler_decay_lr: float = 2.5e-6
+
+ checkpoint_path: str = None
+
+ padding_side: str = "right"
+
+ precision: str = "bfloat16"
+ grad_clip_norm: float = 1
+
+ # Allows padding/truncation of generated action tokens during detokenization to ensure decoding.
+ # In the original version, tensors of 0s were generated if shapes didn't match for stable decoding.
+ relaxed_action_decoding: bool = True
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ """Input validation (not exhaustive)."""
+ if self.n_action_steps > self.chunk_size:
+ raise ValueError(
+ f"The chunk size is the upper bound for the number of action steps per model invocation. Got "
+ f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`."
+ )
+ if self.n_obs_steps != 1:
+ raise ValueError(
+ f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`"
+ )
+
+ def validate_features(self) -> None:
+ for i in range(self.empty_cameras):
+ key = f"observation.images.empty_camera_{i}"
+ empty_camera = PolicyFeature(
+ type=FeatureType.VISUAL,
+ shape=(3, 480, 640),
+ )
+ self.input_features[key] = empty_camera
+
+ def get_optimizer_preset(self) -> AdamWConfig:
+ return AdamWConfig(
+ lr=self.optimizer_lr,
+ betas=self.optimizer_betas,
+ eps=self.optimizer_eps,
+ weight_decay=self.optimizer_weight_decay,
+ grad_clip_norm=self.grad_clip_norm,
+ )
+
+ def get_scheduler_preset(self):
+ return CosineDecayWithWarmupSchedulerConfig(
+ peak_lr=self.optimizer_lr,
+ decay_lr=self.scheduler_decay_lr,
+ num_warmup_steps=self.scheduler_warmup_steps,
+ num_decay_steps=self.scheduler_decay_steps,
+ )
+
+ @property
+ def observation_delta_indices(self) -> None:
+ return None
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(self.chunk_size))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/src/lerobot/policies/pi0fast/modeling_pi0fast.py b/src/lerobot/policies/pi0fast/modeling_pi0fast.py
new file mode 100644
index 0000000000..0e53bd3497
--- /dev/null
+++ b/src/lerobot/policies/pi0fast/modeling_pi0fast.py
@@ -0,0 +1,982 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+π0+FAST: Efficient Action Tokenization for Vision-Language-Action Models
+
+[Paper](https://huggingface.co/papers/2501.09747)
+[Jax code](https://github.com/Physical-Intelligence/openpi)
+
+Designed by Physical Intelligence. Ported from Jax by Hugging Face.
+
+Example of finetuning the pi0+FAST pretrained model (`pi0_fast_base` in `openpi`):
+```bash
+python -m lerobot.scripts.train \
+--policy.path=lerobot/pi0fast_base \
+--dataset.repo_id=danaaubakirova/koch_test
+```
+
+Example of training the pi0+FAST neural network with from scratch:
+```bash
+python -m lerobot.scripts.train \
+--policy.type=pi0fast \
+--dataset.repo_id=danaaubakirova/koch_test
+```
+
+Example of using the pi0 pretrained model outside LeRobot training framework:
+```python
+policy = PI0FASTPolicy.from_pretrained("lerobot/pi0fast_base")
+```
+
+"""
+
+from collections import deque
+from functools import partial
+
+import numpy as np
+import torch
+import torch.nn.functional as F # noqa: N812
+from PIL import Image
+from scipy.fft import idct
+from torch import Tensor, nn
+from transformers import AutoProcessor, AutoTokenizer, PaliGemmaForConditionalGeneration
+from transformers.cache_utils import HybridCache, StaticCache
+from transformers.models.auto import CONFIG_MAPPING
+
+from lerobot.constants import ACTION, OBS_STATE
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pi0fast.configuration_pi0fast import PI0FASTConfig
+from lerobot.policies.pretrained import PreTrainedPolicy
+
+PRECISION = {
+ "float16": torch.float16,
+ "float32": torch.float32,
+ "bfloat16": torch.bfloat16,
+}
+
+
+def normalize(x, min_val, max_val):
+ return (x - min_val) / (max_val - min_val)
+
+
+def unnormalize(x, min_val, max_val):
+ return x * (max_val - min_val) + min_val
+
+
+def safe_arcsin(value):
+ # This ensures that the input stays within
+ # [−1,1] to avoid invalid values for arcsin
+ return torch.arcsin(torch.clamp(value, -1.0, 1.0))
+
+
+def aloha_gripper_to_angular(value):
+ # Aloha transforms the gripper positions into a linear space. The following code
+ # reverses this transformation to be consistent with pi0 which is pretrained in
+ # angular space.
+ #
+ # These values are coming from the Aloha code:
+ # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
+ value = unnormalize(value, min_val=0.01844, max_val=0.05800)
+
+ # This is the inverse of the angular to linear transformation inside the Interbotix code.
+ def linear_to_radian(linear_position, arm_length, horn_radius):
+ value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
+ return safe_arcsin(value)
+
+ # The constants are taken from the Interbotix code.
+ value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
+
+ # Normalize to [0, 1].
+ # The values 0.4 and 1.5 were measured on an actual Trossen robot.
+ return normalize(value, min_val=0.4, max_val=1.5)
+
+
+def aloha_gripper_from_angular(value):
+ # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha.
+ # Note that the units are still angular but the range is different.
+
+ # The values 0.4 and 1.5 were measured on an actual Trossen robot.
+ value = unnormalize(value, min_val=0.4, max_val=1.5)
+
+ # These values are coming from the Aloha code:
+ # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
+ return normalize(value, min_val=-0.6213, max_val=1.4910)
+
+
+def aloha_gripper_from_angular_inv(value):
+ # Directly inverts the gripper_from_angular function.
+ value = unnormalize(value, min_val=-0.6213, max_val=1.4910)
+ return normalize(value, min_val=0.4, max_val=1.5)
+
+
+class PI0FASTPolicy(PreTrainedPolicy):
+ """Wrapper class around PI0FAST tokenizer and model to train and run inference within LeRobot."""
+
+ config_class = PI0FASTConfig
+ name = "pi0fast"
+
+ def __init__(
+ self,
+ config: PI0FASTConfig,
+ dataset_stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ """
+ Args:
+ config: Policy configuration class instance or None, in which case the default instantiation of
+ the configuration class is used.
+ dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
+ that they will be passed with a call to `load_state_dict` before the policy is used.
+ """
+
+ super().__init__(config)
+ config.validate_features()
+ self.config = config
+
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
+ self.normalize_targets = Normalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+ self.unnormalize_outputs = Unnormalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+
+ self.language_tokenizer = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224")
+ self.model = PI0FAST(config)
+
+ self.reset()
+
+ def reset(self):
+ """This should be called whenever the environment is reset."""
+ self._action_queue = deque([], maxlen=self.config.n_action_steps)
+
+ def get_optim_params(self) -> dict:
+ return self.parameters()
+
+ def _pi_aloha_decode_state(self, state):
+ # Flip the joints.
+ for motor_idx in [1, 2, 8, 9]:
+ state[:, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx])
+ return state
+
+ def _pi_aloha_encode_actions(self, actions):
+ # Flip the joints.
+ for motor_idx in [1, 2, 8, 9]:
+ actions[:, :, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx])
+ return actions
+
+ def _pi_aloha_encode_actions_inv(self, actions):
+ # Flip the joints again.
+ for motor_idx in [1, 2, 8, 9]:
+ actions[:, :, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx])
+ return actions
+
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Predict a chunk of actions given environment observations."""
+ raise NotImplementedError("Currently not implemented for PI0FAST")
+
+ @torch.no_grad()
+ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
+ """Select a single action given environment observations.
+
+ This method wraps `select_actions` in order to return one action at a time for execution in the
+ environment. It works by managing the actions in a queue and only calling `select_actions` when the
+ queue is empty.
+ """
+ self.eval()
+
+ if self.config.adapt_to_pi_aloha:
+ batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
+
+ batch = self.normalize_inputs(batch)
+
+ # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
+ # querying the policy.
+ if len(self._action_queue) == 0:
+ actions = self.model.generate_actions(batch)
+
+ actions = actions[:, : self.config.n_action_steps]
+
+ original_action_dim = self.config.action_feature.shape[
+ 0
+ ] # self.config.max_action_dim # self.config.action_feature.shape[0]
+ actions = actions[:, :, :original_action_dim]
+
+ actions = self.unnormalize_outputs({"action": actions})["action"]
+
+ if self.config.adapt_to_pi_aloha:
+ actions = self._pi_aloha_encode_actions(actions)
+
+ # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
+ # effectively has shape (n_action_steps, batch_size, *), hence the transpose.
+ self._action_queue.extend(actions.transpose(0, 1))
+ return self._action_queue.popleft()
+
+ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ if self.config.adapt_to_pi_aloha:
+ batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
+ batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
+ batch = self.normalize_inputs(batch)
+ batch = self.normalize_targets(batch)
+ loss_dict = self.model.forward(batch)
+ return loss_dict["loss"], loss_dict
+
+
+def block_causal_update_causal_mask(
+ attention_mask,
+ token_type_ids=None,
+ past_key_values=None,
+ cache_position=None,
+ input_tensor=None,
+ attn_implementation: str = "eager",
+ dtype: torch.dtype = "float32",
+):
+ """
+ Update the causal mask during training and generation. It can be customized to different attention masks.
+ """
+ if attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+ using_static_cache = isinstance(past_key_values, StaticCache)
+ min_dtype = torch.finfo(dtype).min
+
+ if input_tensor is None:
+ input_tensor = attention_mask
+
+ inputs_lead_dim, sequence_length = input_tensor.shape[:2]
+
+ if using_static_cache or isinstance(past_key_values, HybridCache):
+ target_length = past_key_values.get_max_cache_shape()
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else cache_position[0] + sequence_length + 1
+ )
+
+ # Handle precomputed attention masks
+ if attention_mask is not None and attention_mask.dim() == 4:
+ return attention_mask
+
+ # Causal mask initialization
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
+ )
+
+ # Standard causal masking (triu ensures tokens can only attend to past)
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+
+ # Apply block causal mask
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.to(causal_mask.device).bool()
+ cumsum = torch.cumsum(token_type_ids, dim=1)
+ block_causal_mask = cumsum[:, None, :] <= cumsum[:, :, None]
+
+ # Combine causal_mask with block-wise attention mask
+ causal_mask = torch.where(block_causal_mask, 0.0, causal_mask)
+ causal_mask = causal_mask[:, None, :, :]
+ else:
+ # Apply past cache position constraint
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
+ -1, 1
+ )
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
+ else:
+ # Apply past cache position constraint
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
+ -1, 1
+ )
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
+
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # Copy to contiguous memory for in-place edits
+ mask_length = attention_mask.shape[-1]
+
+ # Apply padding mask
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
+ causal_mask.device
+ )
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+
+def prepare_inputs_for_generation(
+ # self,
+ input_ids,
+ past_key_values=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ pixel_values=None,
+ attention_mask=None,
+ token_type_ids=None,
+ use_cache=True,
+ num_logits_to_keep=None,
+ labels=None,
+ self=None,
+ **kwargs,
+):
+ # create block causal attention
+ if cache_position[0] > 0 and input_ids.shape[1] > 0:
+ input_tensor = input_ids[:, -1:]
+ new_positions = (
+ torch.ones(
+ (position_ids.shape[0], input_ids.shape[1]),
+ dtype=position_ids.dtype,
+ device=position_ids.device,
+ ).cumsum(-1)
+ + position_ids[:, -1:]
+ )
+ position_ids = torch.cat([position_ids, new_positions], dim=-1)
+ else:
+ input_tensor = inputs_embeds
+ attention_mask = block_causal_update_causal_mask(
+ attention_mask=attention_mask,
+ past_key_values=past_key_values,
+ cache_position=cache_position,
+ input_tensor=input_tensor,
+ token_type_ids=token_type_ids,
+ dtype=self.dtype,
+ attn_implementation=self.config.text_config._attn_implementation,
+ )
+ # Overwritten -- custom `position_ids` and `pixel_values` handling
+ model_inputs = self.language_model.prepare_inputs_for_generation(
+ input_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ cache_position=cache_position,
+ use_cache=use_cache,
+ num_logits_to_keep=num_logits_to_keep,
+ token_type_ids=token_type_ids,
+ **kwargs,
+ )
+
+ # Position_ids in Paligemma are 1-indexed
+ if model_inputs.get("position_ids") is not None:
+ model_inputs["position_ids"] += 1
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
+ # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always
+ if cache_position[0] == 0:
+ model_inputs["pixel_values"] = pixel_values
+ is_training = token_type_ids is not None and labels is not None
+ if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
+ input_tensor = inputs_embeds if inputs_embeds is not None else input_ids
+ causal_mask = self._update_causal_mask(
+ attention_mask, token_type_ids, past_key_values, cache_position, input_tensor, is_training
+ )
+ model_inputs["attention_mask"] = causal_mask
+
+ return model_inputs
+
+
+class PI0FAST(nn.Module):
+ def __init__(self, config: PI0FASTConfig):
+ super().__init__()
+ self.config = config
+
+ # TODO: move tokenizers in Policy
+ fast_tokenizer_path = "physical-intelligence/fast"
+ pi0_paligemma_path = "google/paligemma-3b-pt-224"
+ self.paligemma_tokenizer = AutoTokenizer.from_pretrained(pi0_paligemma_path)
+ self.processor = AutoProcessor.from_pretrained(pi0_paligemma_path)
+ self.fast_tokenizer = AutoProcessor.from_pretrained(fast_tokenizer_path, trust_remote_code=True)
+ self.fast_skip_tokens = self.config.fast_skip_tokens
+ self.max_input_seq_len = self.config.max_input_seq_len
+ self.action_horizon = self.config.chunk_size
+ self.action_dim = self.config.action_feature.shape[
+ 0
+ ] # self.config.max_action_dim # self.config.action_feature.shape[0]
+ precision = config.precision
+ torch_precision = PRECISION.get(precision, torch.float32)
+ self.pad_token_id = (
+ self.paligemma_tokenizer.pad_token_id
+ if hasattr(self.paligemma_tokenizer, "pad_token_id")
+ else self.paligemma_tokenizer.eos_token_id
+ )
+
+ paligemma_config = CONFIG_MAPPING["paligemma"](
+ transformers_version="4.48.1",
+ _vocab_size=257152,
+ bos_token_id=2,
+ eos_token_id=1,
+ hidden_size=2048,
+ image_token_index=257152,
+ model_type="paligemma",
+ pad_token_id=0,
+ projection_dim=2048,
+ text_config={
+ "hidden_activation": "gelu_pytorch_tanh",
+ "hidden_size": 2048,
+ "intermediate_size": 16384,
+ "model_type": "gemma",
+ "num_attention_heads": 8,
+ "num_hidden_layers": 18,
+ "num_image_tokens": 256,
+ "num_key_value_heads": 1,
+ "torch_dtype": precision,
+ "vocab_size": 257152,
+ "_attn_implementation": "eager",
+ },
+ vision_config={
+ "hidden_size": 1152,
+ "intermediate_size": 4304,
+ "model_type": "siglip_vision_model",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 27,
+ "num_image_tokens": 256,
+ "patch_size": 14,
+ "projection_dim": 2048,
+ "projector_hidden_act": "gelu_pytorch_tanh",
+ "torch_dtype": precision,
+ "vision_use_head": False,
+ },
+ )
+ self.pi0_paligemma = PaliGemmaForConditionalGeneration(config=paligemma_config)
+
+ self.pi0_paligemma.prepare_inputs_for_generation = partial(
+ prepare_inputs_for_generation, self=self.pi0_paligemma
+ )
+ # change important stuff in bf16
+ params_to_change_dtype = [
+ "language_model",
+ "vision_tower",
+ "multi_modal",
+ ]
+ for name, param in self.pi0_paligemma.named_parameters():
+ if any(selector in name for selector in params_to_change_dtype):
+ param.data = param.data.to(dtype=torch_precision)
+ self.set_requires_grad()
+ self.image_keys = self.config.image_features.keys()
+ self.ignore_index = self.pi0_paligemma.config.ignore_index
+ self.padding_side = self.config.padding_side
+
+ def set_requires_grad(self):
+ if self.config.freeze_vision_encoder:
+ self.pi0_paligemma.vision_tower.eval()
+ for params in self.pi0_paligemma.vision_tower.parameters():
+ params.requires_grad = False
+ # To avoid unused params issue with distributed training
+ if self.config.freeze_lm_head:
+ for name, params in self.pi0_paligemma.named_parameters():
+ if "embed_tokens" in name: # lm heads and embedding layer are tied
+ params.requires_grad = False
+
+ def embed_tokens(self, tokens: torch.Tensor):
+ return self.pi0_paligemma.language_model.model.embed_tokens(tokens)
+
+ def prepare_inputs_for_generation(self, *args, **kwargs):
+ return self.pi0_paligemma.prepare_inputs_for_generation(*args, **kwargs)
+
+ def prepare_images(self, batch):
+ """Preprocess LeRobot batch into Pi0 inputs"""
+ images = []
+ img_masks = []
+ present_img_keys = [key for key in self.image_keys if key in batch]
+ if len(present_img_keys) == 0:
+ raise ValueError(
+ f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})"
+ )
+
+ # Preprocess image features present in the batch
+ num_empty_cameras = 0
+ for key in self.image_keys:
+ if key in present_img_keys:
+ img = batch[key]
+
+ if self.config.resize_imgs_with_padding is not None:
+ img = resize_with_pad(
+ img,
+ *self.config.resize_imgs_with_padding,
+ pad_value=0,
+ interpolate_like_pi=self.config.interpolate_like_pi,
+ )
+
+ # Normalize from range [0,1] to [-1,1] as expected by siglip
+ img = img * 2.0 - 1.0
+
+ bsize = img.shape[0]
+ device = img.device
+ mask = torch.ones(bsize, dtype=torch.bool, device=device)
+ else:
+ if num_empty_cameras >= self.config.empty_cameras:
+ continue
+ img = torch.ones_like(img) * -1
+ bsize = img.shape[0]
+ device = img.device
+ mask = torch.ones(bsize, dtype=torch.bool, device=device)
+ num_empty_cameras += 1
+
+ images.append(img)
+ img_masks.append(mask)
+ return images, img_masks
+
+ def normalize_actions(self, actions: torch.Tensor) -> torch.Tensor:
+ mins = actions.amin(dim=(1, 2), keepdim=True) # [0]
+ maxs = actions.amax(dim=(1, 2), keepdim=True) # [0]
+ return 2 * (actions - mins) / (maxs - mins + 1e-8) - 1
+
+ def _act_tokens_to_paligemma_tokens(self, tokens: torch.Tensor) -> torch.Tensor:
+ out = self.paligemma_tokenizer.vocab_size - 1 - self.fast_skip_tokens - tokens
+ return out
+
+ def fast_tokenizer_wrapper(self, actions_norm):
+ """
+ A wrapper for self.fast_tokenizer that ensures batch processing,
+ conversion to PyTorch tensors, and returns a dictionary without padding.
+ """
+ batch_tokens = self.fast_tokenizer(actions_norm)
+ fast_out = self.processor.tokenizer.pad({"input_ids": batch_tokens}, return_tensors="pt")
+
+ return fast_out
+
+ def create_token_type_ids(self, padded_mask: torch.Tensor, prefix_len: int) -> torch.Tensor:
+ token_type_ids = torch.zeros_like(padded_mask, dtype=torch.bool)
+ # Compute cumulative sum mask
+ cumsum_mask = (padded_mask != 0).cumsum(dim=1)
+ # Suffix block (everything after prefix_len)
+ suffix_mask = cumsum_mask > prefix_len
+ token_type_ids = suffix_mask
+ return token_type_ids
+
+ def create_input_tokens(self, state, lang_text, actions=None):
+ bsize = state.shape[0]
+ device = state.device
+ bins = torch.linspace(-1, 1, 256 + 1, device=device)[:-1]
+ discretized = torch.bucketize(state, bins) - 1
+ discretized = discretized[:, :32]
+
+ prefix_texts = []
+ state_text = []
+ for txt, disc in zip(lang_text, discretized, strict=False):
+ cleaned = txt.lower().strip().replace("_", " ")
+ state_str = " ".join(str(val.item()) for val in disc)
+ prefix_texts.append(f"Task: {cleaned}, State: {state_str};\n")
+ state_text.append(f"State: {state_str};\n")
+
+ prefix_out = self.paligemma_tokenizer(
+ prefix_texts, add_special_tokens=True, return_tensors="pt", padding="longest", truncation=False
+ )
+ prefix_ids = prefix_out["input_ids"].to(device)
+ prefix_mask = prefix_out["attention_mask"].to(device)
+ prefix_lens = prefix_mask.sum(dim=1)[:, None].cpu()
+
+ if actions is not None:
+ actions_norm = self.normalize_actions(actions)
+ actions_pad = F.pad(
+ actions_norm, (0, max(0, self.config.max_action_dim - actions_norm.shape[2])), value=0
+ )[:, :, : self.config.max_action_dim]
+ fast_out = self.fast_tokenizer_wrapper(
+ actions_pad.cpu(),
+ )
+ act_ids = fast_out["input_ids"]
+ act_mask = fast_out["attention_mask"].to(device)
+
+ act_ids = self._act_tokens_to_paligemma_tokens(act_ids).to(device)
+ # Replace action with 0 to pad tokens
+ act_ids = torch.where(
+ act_ids == self.paligemma_tokenizer.vocab_size - 1 - self.fast_skip_tokens,
+ self.pad_token_id,
+ act_ids,
+ )
+
+ eos_token = torch.tensor(
+ [self.paligemma_tokenizer.eos_token_id], dtype=torch.long, device=device
+ ).expand(bsize, -1)
+ eos_mask = torch.tensor([1], dtype=torch.long, device=device).expand(bsize, -1)
+ bos = self.paligemma_tokenizer("Action: ", add_special_tokens=False, return_tensors="pt")
+ bos_token = bos["input_ids"].expand(act_ids.shape[0], -1).to(device)
+ bos_mask = bos["attention_mask"].expand(act_ids.shape[0], -1).to(device)
+ act_ids = torch.cat([bos_token, act_ids, eos_token], dim=1)
+ act_mask = torch.cat([bos_mask, act_mask, eos_mask], dim=1)
+ act_mask = act_mask.to(device)
+ else:
+ act_ids = torch.empty(bsize, self.pad_token_id, dtype=torch.long, device=device)
+ act_mask = torch.empty(bsize, 0, dtype=torch.long, device=device)
+ final_ids = torch.cat([prefix_ids, act_ids], dim=1)
+
+ final_mask = torch.cat([prefix_mask, act_mask], dim=1)
+ batch_inputs = {"input_ids": final_ids.tolist(), "attention_mask": final_mask.tolist()}
+
+ # Use tokenizer pad function
+ padded_output = self.paligemma_tokenizer.pad(
+ batch_inputs, padding="longest", max_length=180, return_tensors="pt"
+ )
+ padded_mask = padded_output["attention_mask"]
+
+ # define tensor of padding lengths
+ att_mask = (padded_mask != 0).cumsum(dim=1) > prefix_lens
+
+ token_type_ids = self.create_token_type_ids(padded_mask=padded_mask, prefix_len=prefix_lens)
+
+ padded_output["padded_mask"] = padded_output.pop("attention_mask")
+ padded_output["attention_mask"] = att_mask
+ # loss is computed not on prefix, and not on padding
+ padded_output["loss_mask"] = att_mask & padded_output["padded_mask"]
+ padded_output["token_type_ids"] = token_type_ids
+ return padded_output
+
+ def shift_padding_side(
+ self,
+ tokens: torch.Tensor,
+ ar_mask: torch.Tensor,
+ padding_mask: torch.Tensor,
+ loss_mask: torch.Tensor,
+ targets: torch.Tensor,
+ token_type_ids: torch.Tensor,
+ padding_side: str = "right",
+ ) -> tuple[torch.Tensor]:
+ if padding_side not in ["right", "left"]:
+ return tokens, ar_mask, padding_mask, loss_mask, targets, token_type_ids
+
+ new_tokens = torch.empty_like(tokens)
+ new_ar_masks = torch.empty_like(ar_mask)
+ new_padding_mask = torch.empty_like(padding_mask)
+ new_loss_mask = torch.empty_like(loss_mask)
+ new_targets = torch.empty_like(targets)
+ new_token_type_ids = torch.empty_like(token_type_ids)
+ batch_size = tokens.shape[0]
+ for i in range(batch_size):
+ padding_indices = torch.where(padding_mask[i] == 0)[0]
+ non_padding_indices = torch.where(padding_mask[i] == 1)[0]
+ if padding_side == "left":
+ new_indices = torch.cat((padding_indices, non_padding_indices), dim=0)
+ else:
+ new_indices = torch.cat((non_padding_indices, padding_indices), dim=0)
+ new_tokens[i] = tokens[i].index_select(0, new_indices)
+ new_ar_masks[i] = ar_mask[i].index_select(0, new_indices)
+ new_padding_mask[i] = padding_mask[i].index_select(0, new_indices)
+ new_loss_mask[i] = loss_mask[i].index_select(0, new_indices)
+ new_targets[i] = targets[i].index_select(0, new_indices)
+ new_token_type_ids[i] = token_type_ids[i].index_select(0, new_indices)
+
+ return new_tokens, new_ar_masks, new_padding_mask, new_loss_mask, new_targets, new_token_type_ids
+
+ def forward(self, batch: dict[str, Tensor]):
+ device = batch[OBS_STATE].device
+ # TODO: keep like this or move to the policy .forward
+ images, img_masks = self.prepare_images(batch)
+
+ padded_outs = self.create_input_tokens(
+ state=batch[OBS_STATE],
+ lang_text=batch["task"],
+ actions=batch[ACTION],
+ )
+
+ embs, pad_masks, _, targets, loss_mask, token_type_ids = self.embed_inputs(
+ images,
+ img_masks,
+ padded_outs["input_ids"],
+ padded_outs["padded_mask"],
+ padded_outs["attention_mask"],
+ padded_outs["loss_mask"],
+ padded_outs["token_type_ids"],
+ padding_side=self.padding_side,
+ )
+ position_ids = torch.cumsum(pad_masks, dim=1) - 1
+ token_type_ids = token_type_ids.to(dtype=torch.int64)
+ past_seen_tokens = 0
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + embs.shape[1], device=embs.device)
+ pad_masks = block_causal_update_causal_mask(
+ attention_mask=pad_masks,
+ past_key_values=None,
+ cache_position=cache_position,
+ input_tensor=embs,
+ token_type_ids=token_type_ids,
+ dtype=self.pi0_paligemma.dtype,
+ attn_implementation=self.pi0_paligemma.config.text_config._attn_implementation,
+ )
+ outputs = self.pi0_paligemma.forward(
+ input_ids=None,
+ token_type_ids=None,
+ attention_mask=pad_masks,
+ position_ids=position_ids,
+ past_key_values=None,
+ inputs_embeds=embs,
+ use_cache=False,
+ labels=None,
+ )
+
+ logits = outputs.logits
+
+ loss_fct = nn.CrossEntropyLoss(reduction="none")
+
+ # Shift left for next-step prediction
+ logits = logits[:, :-1, :]
+ targets = targets[:, 1:].to(device) # Shift targets
+ loss_mask = loss_mask[:, 1:].to(device) # Ensure correct shape
+
+ # Compute per-token loss
+ token_loss = loss_fct(logits.reshape(-1, logits.shape[-1]), targets.reshape(-1))
+
+ # Apply loss mask
+ token_loss = token_loss * loss_mask.reshape(-1)
+
+ # Compute final loss
+ loss = token_loss.sum() / torch.clamp(loss_mask.sum(), min=1)
+
+ # Return loss dictionary
+ loss_dict = {"ce_loss": loss.item(), "loss": loss}
+ return loss_dict
+
+ def decode_actions_with_fast(
+ self,
+ tokens: list[list[int]],
+ *,
+ time_horizon: int | None = None,
+ action_dim: int | None = None,
+ relaxed_decoding: bool = True,
+ ) -> np.array:
+ """
+ Adapt original decoding in FAST to always return actions instead of zeros.
+ """
+ self.time_horizon = (
+ time_horizon or self.fast_tokenizer.time_horizon or self.fast_tokenizer.called_time_horizon
+ )
+ self.action_dim = (
+ action_dim or self.fast_tokenizer.action_dim or self.fast_tokenizer.called_action_dim
+ )
+
+ # Cache the time horizon and action dimension for the next call
+ self.called_time_horizon = self.time_horizon
+ self.called_action_dim = self.action_dim
+
+ assert self.time_horizon is not None and self.action_dim is not None, (
+ "Tokenizer not initialized, call encode() once or pass in time_horizon and action_dim."
+ )
+
+ decoded_actions = []
+ for token in tokens:
+ try:
+ decoded_tokens = self.fast_tokenizer.bpe_tokenizer.decode(token)
+ decoded_dct_coeff = np.array(list(map(ord, decoded_tokens))) + self.fast_tokenizer.min_token
+ if relaxed_decoding:
+ # Expected sequence length
+ expected_seq_len = self.time_horizon * self.action_dim
+ diff = expected_seq_len - decoded_dct_coeff.shape[0]
+ # Apply truncation if too long
+ if diff < 0:
+ decoded_dct_coeff = decoded_dct_coeff[:expected_seq_len] # Truncate on the right
+ # Apply padding if too short
+ elif diff > 0:
+ decoded_dct_coeff = np.pad(
+ decoded_dct_coeff, (0, diff), mode="constant", constant_values=0
+ )
+
+ decoded_dct_coeff = decoded_dct_coeff.reshape(-1, self.action_dim)
+ assert decoded_dct_coeff.shape == (
+ self.time_horizon,
+ self.action_dim,
+ ), (
+ f"Decoded DCT coefficients have shape {decoded_dct_coeff.shape}, expected ({self.time_horizon}, {self.action_dim})"
+ )
+ except Exception as e:
+ print(f"Error decoding tokens: {e}")
+ print(f"Tokens: {token}")
+ decoded_dct_coeff = np.zeros((self.time_horizon, self.action_dim))
+ decoded_actions.append(idct(decoded_dct_coeff / self.fast_tokenizer.scale, axis=0, norm="ortho"))
+ return np.stack(decoded_actions)
+
+ def extract_actions(self, tokens: torch.Tensor, action_horizon: int, action_dim: int) -> torch.Tensor:
+ """
+ Extracts actions from predicted output tokens using the FAST model.
+
+ Args:
+ tokens (torch.Tensor): The input tensor of tokenized outputs.
+ action_horizon (int): The number of timesteps for actions.
+ action_dim (int): The dimensionality of each action.
+
+ Returns:
+ torch.Tensor: The extracted actions as a tensor of shape (action_horizon, action_dim).
+ """
+ # Decode predicted output tokens
+ decoded_tokens = self.paligemma_tokenizer.batch_decode(tokens, skip_special_tokens=True)
+ cleaned_tokens = [
+ tokens_sequence.replace("Action:", "").replace(":", "").strip().split("|")[0].strip()
+ for tokens_sequence in decoded_tokens
+ ]
+ raw_action_tokens = [
+ self.processor.tokenizer.encode(sample_tokens, return_tensors="pt", padding=False)
+ for sample_tokens in cleaned_tokens
+ ] # something like this should be robust #looks good
+ action_tokens = [
+ self._act_tokens_to_paligemma_tokens(raw_action_token) for raw_action_token in raw_action_tokens
+ ]
+ # returns the tensor of decoded actions per sample in a list
+ decoded_actions = [
+ torch.tensor(
+ self.decode_actions_with_fast(
+ tok.tolist(),
+ time_horizon=action_horizon,
+ action_dim=action_dim,
+ relaxed_decoding=self.config.relaxed_action_decoding,
+ ),
+ device=tokens.device,
+ ).squeeze(0)
+ for tok in action_tokens
+ ]
+
+ return torch.stack(
+ decoded_actions,
+ dim=0,
+ )
+
+ def generate_actions(self, batch: dict[str, Tensor]):
+ # TODO: keep like this or move to the policy .forward
+ images, img_masks = self.prepare_images(batch)
+
+ padded_outs = self.create_input_tokens(state=batch[OBS_STATE], lang_text=batch["task"], actions=None)
+ embs, pad_masks, att_masks2, targets, loss_mask, token_type_ids = self.embed_inputs(
+ images,
+ img_masks,
+ padded_outs["input_ids"],
+ padded_outs["padded_mask"],
+ padded_outs["attention_mask"],
+ padded_outs["loss_mask"],
+ padded_outs["token_type_ids"],
+ padding_side="left",
+ )
+ token_type_ids = token_type_ids.to(dtype=torch.int64)
+ prefix_position_ids = torch.cumsum(pad_masks, dim=1) - 1
+ output_tokens = self.pi0_paligemma.generate(
+ input_ids=None,
+ attention_mask=pad_masks,
+ position_ids=prefix_position_ids,
+ past_key_values=None,
+ inputs_embeds=embs,
+ use_cache=self.config.use_cache,
+ max_new_tokens=self.config.max_decoding_steps,
+ do_sample=False,
+ num_beams=1,
+ token_type_ids=token_type_ids,
+ )
+ actions = self.extract_actions(output_tokens, self.action_horizon, self.action_dim)
+ return actions
+
+ def embed_image(self, image: torch.Tensor):
+ # Handle different transformers versions
+ if hasattr(self.pi0_paligemma, "get_image_features"):
+ return self.pi0_paligemma.get_image_features(image)
+ else:
+ return self.pi0_paligemma.model.get_image_features(image)
+
+ def embed_inputs(
+ self,
+ images,
+ img_masks,
+ tokens,
+ pad_mask,
+ ar_mask,
+ loss_mask,
+ token_type_ids,
+ padding_side: str = "right",
+ ):
+ # TODO: avoid list in python and torch.cat ; prefer pre-allocation with torch.empty
+ # images are a list of same size
+ # vectorizing everything!
+ device = images[0].device
+ image_embedding_dim = images[0].shape[-1] # TODO should be from self.config
+ all_images = torch.stack(images, dim=1).to(device)
+ b, n, c, h, w = all_images.shape
+ all_images = all_images.view(b * n, c, h, w)
+ embedded = self.embed_image(all_images).to(device)
+ b_n, p, image_embedding_dim = embedded.shape # Extract current dimensions
+ m = b_n // b # Compute the number of images per sample dynamically
+
+ # Reshape dynamically
+ embedded = embedded.view(b, m, p, image_embedding_dim)
+ tokens_embs = self.embed_tokens(tokens.to(device))
+
+ img_masks = torch.stack(img_masks, dim=1).unsqueeze(-1).to(device)
+ num_img_emb = embedded.shape[2]
+ img_pad_masks = img_masks.repeat(1, 1, num_img_emb).view(b, -1)
+ img_att_masks = torch.zeros((b, n, num_img_emb), dtype=torch.long, device=device).reshape(b, -1)
+
+ image_target_tokens = (
+ torch.ones((b, n, num_img_emb), dtype=torch.long, device=device) * self.pad_token_id
+ ).reshape(b, -1)
+ image_loss_mask = torch.zeros((b, n, num_img_emb), dtype=torch.long, device=device).reshape(b, -1)
+
+ embedded = embedded.reshape(b, n * num_img_emb, image_embedding_dim) # Shape: (B, N*P, D)
+
+ embs = torch.cat([embedded, tokens_embs], dim=1).to(device)
+ pad_masks = torch.cat([img_pad_masks, pad_mask.to(device)], dim=1)
+ att_masks = torch.cat([img_att_masks, ar_mask.to(device)], dim=1)
+ loss_masks = torch.cat([image_loss_mask, loss_mask.to(device)], dim=1)
+ targets = torch.cat([image_target_tokens, tokens.to(device)], dim=1)
+ token_type_ids = torch.cat([img_att_masks, token_type_ids.to(device)], dim=1)
+
+ # Shift pad tokens to the left (.generate()) or right (.train())
+ embs, att_masks, pad_masks, loss_masks, targets, token_type_ids = self.shift_padding_side(
+ embs, att_masks, pad_masks, loss_masks, targets, token_type_ids, padding_side=padding_side
+ )
+
+ targets = torch.where(targets == self.pad_token_id, self.ignore_index, targets)
+ return embs, pad_masks, att_masks, targets, loss_masks, token_type_ids
+
+
+def resize_with_pad(img, width, height, pad_value=0, interpolate_like_pi=True):
+ # assume no-op when width height fits already
+ if img.ndim != 4:
+ raise ValueError(f"(b,c,h,w) expected, but {img.shape}")
+
+ cur_height, cur_width = img.shape[2:]
+
+ ratio = max(cur_width / width, cur_height / height)
+ resized_height = int(cur_height / ratio)
+ resized_width = int(cur_width / ratio)
+
+ if interpolate_like_pi:
+ img = (img * 255.0).to(dtype=torch.uint8)
+ img = img.permute(0, 2, 3, 1)
+ original_device = img.device
+ img = img.to(device="cpu").numpy()
+ imgs = []
+ for sub_img in img:
+ sub_img = Image.fromarray(sub_img)
+ resized_img = sub_img.resize((resized_width, resized_height), resample=2)
+ resized_img = torch.from_numpy(np.array(resized_img))
+ imgs.append(resized_img)
+ img = torch.stack(imgs, dim=0)
+ img = img.permute(0, 3, 1, 2)
+ resized_img = img.to(device=original_device, dtype=torch.float32) / 255.0
+ else:
+ resized_img = F.interpolate(
+ img, size=(resized_height, resized_width), mode="bilinear", align_corners=False
+ )
+
+ pad_height = max(0, int(height - resized_height))
+ pad_width = max(0, int(width - resized_width))
+
+ # pad on left and top of image
+ padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value)
+ return padded_img
diff --git a/src/lerobot/policies/pretrained.py b/src/lerobot/policies/pretrained.py
new file mode 100644
index 0000000000..d745c901c4
--- /dev/null
+++ b/src/lerobot/policies/pretrained.py
@@ -0,0 +1,243 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+import builtins
+import logging
+import os
+from importlib.resources import files
+from pathlib import Path
+from tempfile import TemporaryDirectory
+from typing import TypeVar
+
+import packaging
+import safetensors
+from huggingface_hub import HfApi, ModelCard, ModelCardData, hf_hub_download
+from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
+from huggingface_hub.errors import HfHubHTTPError
+from safetensors.torch import load_model as load_model_as_safetensor, save_model as save_model_as_safetensor
+from torch import Tensor, nn
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.train import TrainPipelineConfig
+from lerobot.utils.hub import HubMixin
+
+T = TypeVar("T", bound="PreTrainedPolicy")
+
+
+class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
+ """
+ Base class for policy models.
+ """
+
+ config_class: None
+ name: None
+
+ def __init__(self, config: PreTrainedConfig, *inputs, **kwargs):
+ super().__init__()
+ if not isinstance(config, PreTrainedConfig):
+ raise ValueError(
+ f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
+ "`PreTrainedConfig`. To create a model from a pretrained model use "
+ f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.config = config
+
+ def __init_subclass__(cls, **kwargs):
+ super().__init_subclass__(**kwargs)
+ if not getattr(cls, "config_class", None):
+ raise TypeError(f"Class {cls.__name__} must define 'config_class'")
+ if not getattr(cls, "name", None):
+ raise TypeError(f"Class {cls.__name__} must define 'name'")
+
+ def _save_pretrained(self, save_directory: Path) -> None:
+ self.config._save_pretrained(save_directory)
+ model_to_save = self.module if hasattr(self, "module") else self
+ save_model_as_safetensor(model_to_save, str(save_directory / SAFETENSORS_SINGLE_FILE))
+
+ @classmethod
+ def from_pretrained(
+ cls: builtins.type[T],
+ pretrained_name_or_path: str | Path,
+ *,
+ config: PreTrainedConfig | None = None,
+ force_download: bool = False,
+ resume_download: bool | None = None,
+ proxies: dict | None = None,
+ token: str | bool | None = None,
+ cache_dir: str | Path | None = None,
+ local_files_only: bool = False,
+ revision: str | None = None,
+ strict: bool = False,
+ **kwargs,
+ ) -> T:
+ """
+ The policy is set in evaluation mode by default using `policy.eval()` (dropout modules are
+ deactivated). To train it, you should first set it back in training mode with `policy.train()`.
+ """
+ if config is None:
+ config = PreTrainedConfig.from_pretrained(
+ pretrained_name_or_path=pretrained_name_or_path,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ token=token,
+ cache_dir=cache_dir,
+ local_files_only=local_files_only,
+ revision=revision,
+ **kwargs,
+ )
+ model_id = str(pretrained_name_or_path)
+ instance = cls(config, **kwargs)
+ if os.path.isdir(model_id):
+ print("Loading weights from local directory")
+ model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE)
+ policy = cls._load_as_safetensor(instance, model_file, config.device, strict)
+ else:
+ try:
+ model_file = hf_hub_download(
+ repo_id=model_id,
+ filename=SAFETENSORS_SINGLE_FILE,
+ revision=revision,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ proxies=proxies,
+ resume_download=resume_download,
+ token=token,
+ local_files_only=local_files_only,
+ )
+ policy = cls._load_as_safetensor(instance, model_file, config.device, strict)
+ except HfHubHTTPError as e:
+ raise FileNotFoundError(
+ f"{SAFETENSORS_SINGLE_FILE} not found on the HuggingFace Hub in {model_id}"
+ ) from e
+
+ policy.to(config.device)
+ policy.eval()
+ return policy
+
+ @classmethod
+ def _load_as_safetensor(cls, model: T, model_file: str, map_location: str, strict: bool) -> T:
+ if packaging.version.parse(safetensors.__version__) < packaging.version.parse("0.4.3"):
+ load_model_as_safetensor(model, model_file, strict=strict)
+ if map_location != "cpu":
+ logging.warning(
+ "Loading model weights on other devices than 'cpu' is not supported natively in your version of safetensors."
+ " This means that the model is loaded on 'cpu' first and then copied to the device."
+ " This leads to a slower loading time."
+ " Please update safetensors to version 0.4.3 or above for improved performance."
+ )
+ model.to(map_location)
+ else:
+ safetensors.torch.load_model(model, model_file, strict=strict, device=map_location)
+ return model
+
+ @abc.abstractmethod
+ def get_optim_params(self) -> dict:
+ """
+ Returns the policy-specific parameters dict to be passed on to the optimizer.
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def reset(self):
+ """To be called whenever the environment is reset.
+
+ Does things like clearing caches.
+ """
+ raise NotImplementedError
+
+ # TODO(aliberts, rcadene): split into 'forward' and 'compute_loss'?
+ @abc.abstractmethod
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict | None]:
+ """_summary_
+
+ Args:
+ batch (dict[str, Tensor]): _description_
+
+ Returns:
+ tuple[Tensor, dict | None]: The loss and potentially other information. Apart from the loss which
+ is a Tensor, all other items should be logging-friendly, native Python types.
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Returns the action chunk (for action chunking policies) for a given observation, potentially in batch mode.
+
+ Child classes using action chunking should use this method within `select_action` to form the action chunk
+ cached for selection.
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
+ """Return one action to run in the environment (potentially in batch mode).
+
+ When the model uses a history of observations, or outputs a sequence of actions, this method deals
+ with caching.
+ """
+ raise NotImplementedError
+
+ def push_model_to_hub(
+ self,
+ cfg: TrainPipelineConfig,
+ ):
+ api = HfApi()
+ repo_id = api.create_repo(
+ repo_id=self.config.repo_id, private=self.config.private, exist_ok=True
+ ).repo_id
+
+ # Push the files to the repo in a single commit
+ with TemporaryDirectory(ignore_cleanup_errors=True) as tmp:
+ saved_path = Path(tmp) / repo_id
+
+ self.save_pretrained(saved_path) # Calls _save_pretrained and stores model tensors
+
+ card = self.generate_model_card(
+ cfg.dataset.repo_id, self.config.type, self.config.license, self.config.tags
+ )
+ card.save(str(saved_path / "README.md"))
+
+ cfg.save_pretrained(saved_path) # Calls _save_pretrained and stores train config
+
+ commit_info = api.upload_folder(
+ repo_id=repo_id,
+ repo_type="model",
+ folder_path=saved_path,
+ commit_message="Upload policy weights, train config and readme",
+ allow_patterns=["*.safetensors", "*.json", "*.yaml", "*.md"],
+ ignore_patterns=["*.tmp", "*.log"],
+ )
+
+ logging.info(f"Model pushed to {commit_info.repo_url.url}")
+
+ def generate_model_card(
+ self, dataset_repo_id: str, model_type: str, license: str | None, tags: list[str] | None
+ ) -> ModelCard:
+ base_model = "lerobot/smolvla_base" if model_type == "smolvla" else None # Set a base model
+
+ card_data = ModelCardData(
+ license=license or "apache-2.0",
+ library_name="lerobot",
+ pipeline_tag="robotics",
+ tags=list(set(tags or []).union({"robotics", "lerobot", model_type})),
+ model_name=model_type,
+ datasets=dataset_repo_id,
+ base_model=base_model,
+ )
+
+ template_card = files("lerobot.templates").joinpath("lerobot_modelcard_template.md").read_text()
+ card = ModelCard.from_template(card_data, template_str=template_card)
+ card.validate()
+ return card
diff --git a/src/lerobot/policies/sac/configuration_sac.py b/src/lerobot/policies/sac/configuration_sac.py
new file mode 100644
index 0000000000..c57eeeb728
--- /dev/null
+++ b/src/lerobot/policies/sac/configuration_sac.py
@@ -0,0 +1,245 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import NormalizationMode
+from lerobot.constants import ACTION, OBS_IMAGE, OBS_STATE
+from lerobot.optim.optimizers import MultiAdamConfig
+
+
+def is_image_feature(key: str) -> bool:
+ """Check if a feature key represents an image feature.
+
+ Args:
+ key: The feature key to check
+
+ Returns:
+ True if the key represents an image feature, False otherwise
+ """
+ return key.startswith(OBS_IMAGE)
+
+
+@dataclass
+class ConcurrencyConfig:
+ """Configuration for the concurrency of the actor and learner.
+ Possible values are:
+ - "threads": Use threads for the actor and learner.
+ - "processes": Use processes for the actor and learner.
+ """
+
+ actor: str = "threads"
+ learner: str = "threads"
+
+
+@dataclass
+class ActorLearnerConfig:
+ learner_host: str = "127.0.0.1"
+ learner_port: int = 50051
+ policy_parameters_push_frequency: int = 4
+ queue_get_timeout: float = 2
+
+
+@dataclass
+class CriticNetworkConfig:
+ hidden_dims: list[int] = field(default_factory=lambda: [256, 256])
+ activate_final: bool = True
+ final_activation: str | None = None
+
+
+@dataclass
+class ActorNetworkConfig:
+ hidden_dims: list[int] = field(default_factory=lambda: [256, 256])
+ activate_final: bool = True
+
+
+@dataclass
+class PolicyConfig:
+ use_tanh_squash: bool = True
+ std_min: float = 1e-5
+ std_max: float = 10.0
+ init_final: float = 0.05
+
+
+@PreTrainedConfig.register_subclass("sac")
+@dataclass
+class SACConfig(PreTrainedConfig):
+ """Soft Actor-Critic (SAC) configuration.
+
+ SAC is an off-policy actor-critic deep RL algorithm based on the maximum entropy
+ reinforcement learning framework. It learns a policy and a Q-function simultaneously
+ using experience collected from the environment.
+
+ This configuration class contains all the parameters needed to define a SAC agent,
+ including network architectures, optimization settings, and algorithm-specific
+ hyperparameters.
+ """
+
+ # Mapping of feature types to normalization modes
+ normalization_mapping: dict[str, NormalizationMode] = field(
+ default_factory=lambda: {
+ "VISUAL": NormalizationMode.MEAN_STD,
+ "STATE": NormalizationMode.MIN_MAX,
+ "ENV": NormalizationMode.MIN_MAX,
+ "ACTION": NormalizationMode.MIN_MAX,
+ }
+ )
+
+ # Statistics for normalizing different types of inputs
+ dataset_stats: dict[str, dict[str, list[float]]] | None = field(
+ default_factory=lambda: {
+ OBS_IMAGE: {
+ "mean": [0.485, 0.456, 0.406],
+ "std": [0.229, 0.224, 0.225],
+ },
+ OBS_STATE: {
+ "min": [0.0, 0.0],
+ "max": [1.0, 1.0],
+ },
+ ACTION: {
+ "min": [0.0, 0.0, 0.0],
+ "max": [1.0, 1.0, 1.0],
+ },
+ }
+ )
+
+ # Architecture specifics
+ # Device to run the model on (e.g., "cuda", "cpu")
+ device: str = "cpu"
+ # Device to store the model on
+ storage_device: str = "cpu"
+ # Name of the vision encoder model (Set to "helper2424/resnet10" for hil serl resnet10)
+ vision_encoder_name: str | None = None
+ # Whether to freeze the vision encoder during training
+ freeze_vision_encoder: bool = True
+ # Hidden dimension size for the image encoder
+ image_encoder_hidden_dim: int = 32
+ # Whether to use a shared encoder for actor and critic
+ shared_encoder: bool = True
+ # Number of discrete actions, eg for gripper actions
+ num_discrete_actions: int | None = None
+ # Dimension of the image embedding pooling
+ image_embedding_pooling_dim: int = 8
+
+ # Training parameter
+ # Number of steps for online training
+ online_steps: int = 1000000
+ # Seed for the online environment
+ online_env_seed: int = 10000
+ # Capacity of the online replay buffer
+ online_buffer_capacity: int = 100000
+ # Capacity of the offline replay buffer
+ offline_buffer_capacity: int = 100000
+ # Whether to use asynchronous prefetching for the buffers
+ async_prefetch: bool = False
+ # Number of steps before learning starts
+ online_step_before_learning: int = 100
+ # Frequency of policy updates
+ policy_update_freq: int = 1
+
+ # SAC algorithm parameters
+ # Discount factor for the SAC algorithm
+ discount: float = 0.99
+ # Initial temperature value
+ temperature_init: float = 1.0
+ # Number of critics in the ensemble
+ num_critics: int = 2
+ # Number of subsampled critics for training
+ num_subsample_critics: int | None = None
+ # Learning rate for the critic network
+ critic_lr: float = 3e-4
+ # Learning rate for the actor network
+ actor_lr: float = 3e-4
+ # Learning rate for the temperature parameter
+ temperature_lr: float = 3e-4
+ # Weight for the critic target update
+ critic_target_update_weight: float = 0.005
+ # Update-to-data ratio for the UTD algorithm (If you want enable utd_ratio, you need to set it to >1)
+ utd_ratio: int = 1
+ # Hidden dimension size for the state encoder
+ state_encoder_hidden_dim: int = 256
+ # Dimension of the latent space
+ latent_dim: int = 256
+ # Target entropy for the SAC algorithm
+ target_entropy: float | None = None
+ # Whether to use backup entropy for the SAC algorithm
+ use_backup_entropy: bool = True
+ # Gradient clipping norm for the SAC algorithm
+ grad_clip_norm: float = 40.0
+
+ # Network configuration
+ # Configuration for the critic network architecture
+ critic_network_kwargs: CriticNetworkConfig = field(default_factory=CriticNetworkConfig)
+ # Configuration for the actor network architecture
+ actor_network_kwargs: ActorNetworkConfig = field(default_factory=ActorNetworkConfig)
+ # Configuration for the policy parameters
+ policy_kwargs: PolicyConfig = field(default_factory=PolicyConfig)
+ # Configuration for the discrete critic network
+ discrete_critic_network_kwargs: CriticNetworkConfig = field(default_factory=CriticNetworkConfig)
+ # Configuration for actor-learner architecture
+ actor_learner_config: ActorLearnerConfig = field(default_factory=ActorLearnerConfig)
+ # Configuration for concurrency settings (you can use threads or processes for the actor and learner)
+ concurrency: ConcurrencyConfig = field(default_factory=ConcurrencyConfig)
+
+ # Optimizations
+ use_torch_compile: bool = True
+
+ def __post_init__(self):
+ super().__post_init__()
+ # Any validation specific to SAC configuration
+
+ def get_optimizer_preset(self) -> MultiAdamConfig:
+ return MultiAdamConfig(
+ weight_decay=0.0,
+ optimizer_groups={
+ "actor": {"lr": self.actor_lr},
+ "critic": {"lr": self.critic_lr},
+ "temperature": {"lr": self.temperature_lr},
+ },
+ )
+
+ def get_scheduler_preset(self) -> None:
+ return None
+
+ def validate_features(self) -> None:
+ has_image = any(is_image_feature(key) for key in self.input_features)
+ has_state = OBS_STATE in self.input_features
+
+ if not (has_state or has_image):
+ raise ValueError(
+ "You must provide either 'observation.state' or an image observation (key starting with 'observation.image') in the input features"
+ )
+
+ if "action" not in self.output_features:
+ raise ValueError("You must provide 'action' in the output features")
+
+ @property
+ def image_features(self) -> list[str]:
+ return [key for key in self.input_features if is_image_feature(key)]
+
+ @property
+ def observation_delta_indices(self) -> list:
+ return None
+
+ @property
+ def action_delta_indices(self) -> list:
+ return None # SAC typically predicts one action at a time
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/src/lerobot/policies/sac/modeling_sac.py b/src/lerobot/policies/sac/modeling_sac.py
new file mode 100644
index 0000000000..878f3cdd85
--- /dev/null
+++ b/src/lerobot/policies/sac/modeling_sac.py
@@ -0,0 +1,1117 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from collections.abc import Callable
+from dataclasses import asdict
+from typing import Literal
+
+import einops
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F # noqa: N812
+from torch import Tensor
+from torch.distributions import MultivariateNormal, TanhTransform, Transform, TransformedDistribution
+
+from lerobot.policies.normalize import NormalizeBuffer
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.sac.configuration_sac import SACConfig, is_image_feature
+from lerobot.policies.utils import get_device_from_parameters
+
+DISCRETE_DIMENSION_INDEX = -1 # Gripper is always the last dimension
+
+
+class SACPolicy(
+ PreTrainedPolicy,
+):
+ config_class = SACConfig
+ name = "sac"
+
+ def __init__(
+ self,
+ config: SACConfig | None = None,
+ dataset_stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ super().__init__(config)
+ config.validate_features()
+ self.config = config
+
+ # Determine action dimension and initialize all components
+ continuous_action_dim = config.output_features["action"].shape[0]
+ self._init_normalization(dataset_stats)
+ self._init_encoders()
+ self._init_critics(continuous_action_dim)
+ self._init_actor(continuous_action_dim)
+ self._init_temperature()
+
+ def get_optim_params(self) -> dict:
+ optim_params = {
+ "actor": [
+ p
+ for n, p in self.actor.named_parameters()
+ if not n.startswith("encoder") or not self.shared_encoder
+ ],
+ "critic": self.critic_ensemble.parameters(),
+ "temperature": self.log_alpha,
+ }
+ if self.config.num_discrete_actions is not None:
+ optim_params["discrete_critic"] = self.discrete_critic.parameters()
+ return optim_params
+
+ def reset(self):
+ """Reset the policy"""
+ pass
+
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Predict a chunk of actions given environment observations."""
+ raise NotImplementedError("SACPolicy does not support action chunking. It returns single actions!")
+
+ @torch.no_grad()
+ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
+ """Select action for inference/evaluation"""
+
+ observations_features = None
+ if self.shared_encoder and self.actor.encoder.has_images:
+ # Cache and normalize image features
+ observations_features = self.actor.encoder.get_cached_image_features(batch, normalize=True)
+
+ actions, _, _ = self.actor(batch, observations_features)
+
+ if self.config.num_discrete_actions is not None:
+ discrete_action_value = self.discrete_critic(batch, observations_features)
+ discrete_action = torch.argmax(discrete_action_value, dim=-1, keepdim=True)
+ actions = torch.cat([actions, discrete_action], dim=-1)
+
+ return actions
+
+ def critic_forward(
+ self,
+ observations: dict[str, Tensor],
+ actions: Tensor,
+ use_target: bool = False,
+ observation_features: Tensor | None = None,
+ ) -> Tensor:
+ """Forward pass through a critic network ensemble
+
+ Args:
+ observations: Dictionary of observations
+ actions: Action tensor
+ use_target: If True, use target critics, otherwise use ensemble critics
+
+ Returns:
+ Tensor of Q-values from all critics
+ """
+
+ critics = self.critic_target if use_target else self.critic_ensemble
+ q_values = critics(observations, actions, observation_features)
+ return q_values
+
+ def discrete_critic_forward(
+ self, observations, use_target=False, observation_features=None
+ ) -> torch.Tensor:
+ """Forward pass through a discrete critic network
+
+ Args:
+ observations: Dictionary of observations
+ use_target: If True, use target critics, otherwise use ensemble critics
+ observation_features: Optional pre-computed observation features to avoid recomputing encoder output
+
+ Returns:
+ Tensor of Q-values from the discrete critic network
+ """
+ discrete_critic = self.discrete_critic_target if use_target else self.discrete_critic
+ q_values = discrete_critic(observations, observation_features)
+ return q_values
+
+ def forward(
+ self,
+ batch: dict[str, Tensor | dict[str, Tensor]],
+ model: Literal["actor", "critic", "temperature", "discrete_critic"] = "critic",
+ ) -> dict[str, Tensor]:
+ """Compute the loss for the given model
+
+ Args:
+ batch: Dictionary containing:
+ - action: Action tensor
+ - reward: Reward tensor
+ - state: Observations tensor dict
+ - next_state: Next observations tensor dict
+ - done: Done mask tensor
+ - observation_feature: Optional pre-computed observation features
+ - next_observation_feature: Optional pre-computed next observation features
+ model: Which model to compute the loss for ("actor", "critic", "discrete_critic", or "temperature")
+
+ Returns:
+ The computed loss tensor
+ """
+ # Extract common components from batch
+ actions: Tensor = batch["action"]
+ observations: dict[str, Tensor] = batch["state"]
+ observation_features: Tensor = batch.get("observation_feature")
+
+ if model == "critic":
+ # Extract critic-specific components
+ rewards: Tensor = batch["reward"]
+ next_observations: dict[str, Tensor] = batch["next_state"]
+ done: Tensor = batch["done"]
+ next_observation_features: Tensor = batch.get("next_observation_feature")
+
+ loss_critic = self.compute_loss_critic(
+ observations=observations,
+ actions=actions,
+ rewards=rewards,
+ next_observations=next_observations,
+ done=done,
+ observation_features=observation_features,
+ next_observation_features=next_observation_features,
+ )
+
+ return {"loss_critic": loss_critic}
+
+ if model == "discrete_critic" and self.config.num_discrete_actions is not None:
+ # Extract critic-specific components
+ rewards: Tensor = batch["reward"]
+ next_observations: dict[str, Tensor] = batch["next_state"]
+ done: Tensor = batch["done"]
+ next_observation_features: Tensor = batch.get("next_observation_feature")
+ complementary_info = batch.get("complementary_info")
+ loss_discrete_critic = self.compute_loss_discrete_critic(
+ observations=observations,
+ actions=actions,
+ rewards=rewards,
+ next_observations=next_observations,
+ done=done,
+ observation_features=observation_features,
+ next_observation_features=next_observation_features,
+ complementary_info=complementary_info,
+ )
+ return {"loss_discrete_critic": loss_discrete_critic}
+ if model == "actor":
+ return {
+ "loss_actor": self.compute_loss_actor(
+ observations=observations,
+ observation_features=observation_features,
+ )
+ }
+
+ if model == "temperature":
+ return {
+ "loss_temperature": self.compute_loss_temperature(
+ observations=observations,
+ observation_features=observation_features,
+ )
+ }
+
+ raise ValueError(f"Unknown model type: {model}")
+
+ def update_target_networks(self):
+ """Update target networks with exponential moving average"""
+ for target_param, param in zip(
+ self.critic_target.parameters(),
+ self.critic_ensemble.parameters(),
+ strict=True,
+ ):
+ target_param.data.copy_(
+ param.data * self.config.critic_target_update_weight
+ + target_param.data * (1.0 - self.config.critic_target_update_weight)
+ )
+ if self.config.num_discrete_actions is not None:
+ for target_param, param in zip(
+ self.discrete_critic_target.parameters(),
+ self.discrete_critic.parameters(),
+ strict=True,
+ ):
+ target_param.data.copy_(
+ param.data * self.config.critic_target_update_weight
+ + target_param.data * (1.0 - self.config.critic_target_update_weight)
+ )
+
+ def update_temperature(self):
+ self.temperature = self.log_alpha.exp().item()
+
+ def compute_loss_critic(
+ self,
+ observations,
+ actions,
+ rewards,
+ next_observations,
+ done,
+ observation_features: Tensor | None = None,
+ next_observation_features: Tensor | None = None,
+ ) -> Tensor:
+ with torch.no_grad():
+ next_action_preds, next_log_probs, _ = self.actor(next_observations, next_observation_features)
+
+ # 2- compute q targets
+ q_targets = self.critic_forward(
+ observations=next_observations,
+ actions=next_action_preds,
+ use_target=True,
+ observation_features=next_observation_features,
+ )
+
+ # subsample critics to prevent overfitting if use high UTD (update to date)
+ # TODO: Get indices before forward pass to avoid unnecessary computation
+ if self.config.num_subsample_critics is not None:
+ indices = torch.randperm(self.config.num_critics)
+ indices = indices[: self.config.num_subsample_critics]
+ q_targets = q_targets[indices]
+
+ # critics subsample size
+ min_q, _ = q_targets.min(dim=0) # Get values from min operation
+ if self.config.use_backup_entropy:
+ min_q = min_q - (self.temperature * next_log_probs)
+
+ td_target = rewards + (1 - done) * self.config.discount * min_q
+
+ # 3- compute predicted qs
+ if self.config.num_discrete_actions is not None:
+ # NOTE: We only want to keep the continuous action part
+ # In the buffer we have the full action space (continuous + discrete)
+ # We need to split them before concatenating them in the critic forward
+ actions: Tensor = actions[:, :DISCRETE_DIMENSION_INDEX]
+ q_preds = self.critic_forward(
+ observations=observations,
+ actions=actions,
+ use_target=False,
+ observation_features=observation_features,
+ )
+
+ # 4- Calculate loss
+ # Compute state-action value loss (TD loss) for all of the Q functions in the ensemble.
+ td_target_duplicate = einops.repeat(td_target, "b -> e b", e=q_preds.shape[0])
+ # You compute the mean loss of the batch for each critic and then to compute the final loss you sum them up
+ critics_loss = (
+ F.mse_loss(
+ input=q_preds,
+ target=td_target_duplicate,
+ reduction="none",
+ ).mean(dim=1)
+ ).sum()
+ return critics_loss
+
+ def compute_loss_discrete_critic(
+ self,
+ observations,
+ actions,
+ rewards,
+ next_observations,
+ done,
+ observation_features=None,
+ next_observation_features=None,
+ complementary_info=None,
+ ):
+ # NOTE: We only want to keep the discrete action part
+ # In the buffer we have the full action space (continuous + discrete)
+ # We need to split them before concatenating them in the critic forward
+ actions_discrete: Tensor = actions[:, DISCRETE_DIMENSION_INDEX:].clone()
+ actions_discrete = torch.round(actions_discrete)
+ actions_discrete = actions_discrete.long()
+
+ discrete_penalties: Tensor | None = None
+ if complementary_info is not None:
+ discrete_penalties: Tensor | None = complementary_info.get("discrete_penalty")
+
+ with torch.no_grad():
+ # For DQN, select actions using online network, evaluate with target network
+ next_discrete_qs = self.discrete_critic_forward(
+ next_observations, use_target=False, observation_features=next_observation_features
+ )
+ best_next_discrete_action = torch.argmax(next_discrete_qs, dim=-1, keepdim=True)
+
+ # Get target Q-values from target network
+ target_next_discrete_qs = self.discrete_critic_forward(
+ observations=next_observations,
+ use_target=True,
+ observation_features=next_observation_features,
+ )
+
+ # Use gather to select Q-values for best actions
+ target_next_discrete_q = torch.gather(
+ target_next_discrete_qs, dim=1, index=best_next_discrete_action
+ ).squeeze(-1)
+
+ # Compute target Q-value with Bellman equation
+ rewards_discrete = rewards
+ if discrete_penalties is not None:
+ rewards_discrete = rewards + discrete_penalties
+ target_discrete_q = rewards_discrete + (1 - done) * self.config.discount * target_next_discrete_q
+
+ # Get predicted Q-values for current observations
+ predicted_discrete_qs = self.discrete_critic_forward(
+ observations=observations, use_target=False, observation_features=observation_features
+ )
+
+ # Use gather to select Q-values for taken actions
+ predicted_discrete_q = torch.gather(predicted_discrete_qs, dim=1, index=actions_discrete).squeeze(-1)
+
+ # Compute MSE loss between predicted and target Q-values
+ discrete_critic_loss = F.mse_loss(input=predicted_discrete_q, target=target_discrete_q)
+ return discrete_critic_loss
+
+ def compute_loss_temperature(self, observations, observation_features: Tensor | None = None) -> Tensor:
+ """Compute the temperature loss"""
+ # calculate temperature loss
+ with torch.no_grad():
+ _, log_probs, _ = self.actor(observations, observation_features)
+ temperature_loss = (-self.log_alpha.exp() * (log_probs + self.target_entropy)).mean()
+ return temperature_loss
+
+ def compute_loss_actor(
+ self,
+ observations,
+ observation_features: Tensor | None = None,
+ ) -> Tensor:
+ actions_pi, log_probs, _ = self.actor(observations, observation_features)
+
+ q_preds = self.critic_forward(
+ observations=observations,
+ actions=actions_pi,
+ use_target=False,
+ observation_features=observation_features,
+ )
+ min_q_preds = q_preds.min(dim=0)[0]
+
+ actor_loss = ((self.temperature * log_probs) - min_q_preds).mean()
+ return actor_loss
+
+ def _init_normalization(self, dataset_stats):
+ """Initialize input/output normalization modules."""
+ self.normalize_inputs = nn.Identity()
+ self.normalize_targets = nn.Identity()
+ if self.config.dataset_stats is not None:
+ params = _convert_normalization_params_to_tensor(self.config.dataset_stats)
+ self.normalize_inputs = NormalizeBuffer(
+ self.config.input_features, self.config.normalization_mapping, params
+ )
+ stats = dataset_stats or params
+ self.normalize_targets = NormalizeBuffer(
+ self.config.output_features, self.config.normalization_mapping, stats
+ )
+
+ def _init_encoders(self):
+ """Initialize shared or separate encoders for actor and critic."""
+ self.shared_encoder = self.config.shared_encoder
+ self.encoder_critic = SACObservationEncoder(self.config, self.normalize_inputs)
+ self.encoder_actor = (
+ self.encoder_critic
+ if self.shared_encoder
+ else SACObservationEncoder(self.config, self.normalize_inputs)
+ )
+
+ def _init_critics(self, continuous_action_dim):
+ """Build critic ensemble, targets, and optional discrete critic."""
+ heads = [
+ CriticHead(
+ input_dim=self.encoder_critic.output_dim + continuous_action_dim,
+ **asdict(self.config.critic_network_kwargs),
+ )
+ for _ in range(self.config.num_critics)
+ ]
+ self.critic_ensemble = CriticEnsemble(
+ encoder=self.encoder_critic, ensemble=heads, output_normalization=self.normalize_targets
+ )
+ target_heads = [
+ CriticHead(
+ input_dim=self.encoder_critic.output_dim + continuous_action_dim,
+ **asdict(self.config.critic_network_kwargs),
+ )
+ for _ in range(self.config.num_critics)
+ ]
+ self.critic_target = CriticEnsemble(
+ encoder=self.encoder_critic, ensemble=target_heads, output_normalization=self.normalize_targets
+ )
+ self.critic_target.load_state_dict(self.critic_ensemble.state_dict())
+
+ if self.config.use_torch_compile:
+ self.critic_ensemble = torch.compile(self.critic_ensemble)
+ self.critic_target = torch.compile(self.critic_target)
+
+ if self.config.num_discrete_actions is not None:
+ self._init_discrete_critics()
+
+ def _init_discrete_critics(self):
+ """Build discrete discrete critic ensemble and target networks."""
+ self.discrete_critic = DiscreteCritic(
+ encoder=self.encoder_critic,
+ input_dim=self.encoder_critic.output_dim,
+ output_dim=self.config.num_discrete_actions,
+ **asdict(self.config.discrete_critic_network_kwargs),
+ )
+ self.discrete_critic_target = DiscreteCritic(
+ encoder=self.encoder_critic,
+ input_dim=self.encoder_critic.output_dim,
+ output_dim=self.config.num_discrete_actions,
+ **asdict(self.config.discrete_critic_network_kwargs),
+ )
+
+ # TODO: (maractingi, azouitine) Compile the discrete critic
+ self.discrete_critic_target.load_state_dict(self.discrete_critic.state_dict())
+
+ def _init_actor(self, continuous_action_dim):
+ """Initialize policy actor network and default target entropy."""
+ # NOTE: The actor select only the continuous action part
+ self.actor = Policy(
+ encoder=self.encoder_actor,
+ network=MLP(input_dim=self.encoder_actor.output_dim, **asdict(self.config.actor_network_kwargs)),
+ action_dim=continuous_action_dim,
+ encoder_is_shared=self.shared_encoder,
+ **asdict(self.config.policy_kwargs),
+ )
+
+ self.target_entropy = self.config.target_entropy
+ if self.target_entropy is None:
+ dim = continuous_action_dim + (1 if self.config.num_discrete_actions is not None else 0)
+ self.target_entropy = -np.prod(dim) / 2
+
+ def _init_temperature(self):
+ """Set up temperature parameter and initial log_alpha."""
+ temp_init = self.config.temperature_init
+ self.log_alpha = nn.Parameter(torch.tensor([math.log(temp_init)]))
+ self.temperature = self.log_alpha.exp().item()
+
+
+class SACObservationEncoder(nn.Module):
+ """Encode image and/or state vector observations."""
+
+ def __init__(self, config: SACConfig, input_normalizer: nn.Module) -> None:
+ super().__init__()
+ self.config = config
+ self.input_normalization = input_normalizer
+ self._init_image_layers()
+ self._init_state_layers()
+ self._compute_output_dim()
+
+ def _init_image_layers(self) -> None:
+ self.image_keys = [k for k in self.config.input_features if is_image_feature(k)]
+ self.has_images = bool(self.image_keys)
+ if not self.has_images:
+ return
+
+ if self.config.vision_encoder_name is not None:
+ self.image_encoder = PretrainedImageEncoder(self.config)
+ else:
+ self.image_encoder = DefaultImageEncoder(self.config)
+
+ if self.config.freeze_vision_encoder:
+ freeze_image_encoder(self.image_encoder)
+
+ dummy = torch.zeros(1, *self.config.input_features[self.image_keys[0]].shape)
+ with torch.no_grad():
+ _, channels, height, width = self.image_encoder(dummy).shape
+
+ self.spatial_embeddings = nn.ModuleDict()
+ self.post_encoders = nn.ModuleDict()
+
+ for key in self.image_keys:
+ name = key.replace(".", "_")
+ self.spatial_embeddings[name] = SpatialLearnedEmbeddings(
+ height=height,
+ width=width,
+ channel=channels,
+ num_features=self.config.image_embedding_pooling_dim,
+ )
+ self.post_encoders[name] = nn.Sequential(
+ nn.Dropout(0.1),
+ nn.Linear(
+ in_features=channels * self.config.image_embedding_pooling_dim,
+ out_features=self.config.latent_dim,
+ ),
+ nn.LayerNorm(normalized_shape=self.config.latent_dim),
+ nn.Tanh(),
+ )
+
+ def _init_state_layers(self) -> None:
+ self.has_env = "observation.environment_state" in self.config.input_features
+ self.has_state = "observation.state" in self.config.input_features
+ if self.has_env:
+ dim = self.config.input_features["observation.environment_state"].shape[0]
+ self.env_encoder = nn.Sequential(
+ nn.Linear(dim, self.config.latent_dim),
+ nn.LayerNorm(self.config.latent_dim),
+ nn.Tanh(),
+ )
+ if self.has_state:
+ dim = self.config.input_features["observation.state"].shape[0]
+ self.state_encoder = nn.Sequential(
+ nn.Linear(dim, self.config.latent_dim),
+ nn.LayerNorm(self.config.latent_dim),
+ nn.Tanh(),
+ )
+
+ def _compute_output_dim(self) -> None:
+ out = 0
+ if self.has_images:
+ out += len(self.image_keys) * self.config.latent_dim
+ if self.has_env:
+ out += self.config.latent_dim
+ if self.has_state:
+ out += self.config.latent_dim
+ self._out_dim = out
+
+ def forward(
+ self, obs: dict[str, Tensor], cache: dict[str, Tensor] | None = None, detach: bool = False
+ ) -> Tensor:
+ obs = self.input_normalization(obs)
+ parts = []
+ if self.has_images:
+ if cache is None:
+ cache = self.get_cached_image_features(obs, normalize=False)
+ parts.append(self._encode_images(cache, detach))
+ if self.has_env:
+ parts.append(self.env_encoder(obs["observation.environment_state"]))
+ if self.has_state:
+ parts.append(self.state_encoder(obs["observation.state"]))
+ if parts:
+ return torch.cat(parts, dim=-1)
+
+ raise ValueError(
+ "No parts to concatenate, you should have at least one image or environment state or state"
+ )
+
+ def get_cached_image_features(self, obs: dict[str, Tensor], normalize: bool = False) -> dict[str, Tensor]:
+ """Extract and optionally cache image features from observations.
+
+ This function processes image observations through the vision encoder once and returns
+ the resulting features.
+ When the image encoder is shared between actor and critics AND frozen, these features can be safely cached and
+ reused across policy components (actor, critic, discrete_critic), avoiding redundant forward passes.
+
+ Performance impact:
+ - The vision encoder forward pass is typically the main computational bottleneck during training and inference
+ - Caching these features can provide 2-4x speedup in training and inference
+
+ Normalization behavior:
+ - When called from inside forward(): set normalize=False since inputs are already normalized
+ - When called from outside forward(): set normalize=True to ensure proper input normalization
+
+ Usage patterns:
+ - Called in select_action() with normalize=True
+ - Called in learner.py's get_observation_features() to pre-compute features for all policy components
+ - Called internally by forward() with normalize=False
+
+ Args:
+ obs: Dictionary of observation tensors containing image keys
+ normalize: Whether to normalize observations before encoding
+ Set to True when calling directly from outside the encoder's forward method
+ Set to False when calling from within forward() where inputs are already normalized
+
+ Returns:
+ Dictionary mapping image keys to their corresponding encoded features
+ """
+ if normalize:
+ obs = self.input_normalization(obs)
+ batched = torch.cat([obs[k] for k in self.image_keys], dim=0)
+ out = self.image_encoder(batched)
+ chunks = torch.chunk(out, len(self.image_keys), dim=0)
+ return dict(zip(self.image_keys, chunks, strict=False))
+
+ def _encode_images(self, cache: dict[str, Tensor], detach: bool) -> Tensor:
+ """Encode image features from cached observations.
+
+ This function takes pre-encoded image features from the cache and applies spatial embeddings and post-encoders.
+ It also supports detaching the encoded features if specified.
+
+ Args:
+ cache (dict[str, Tensor]): The cached image features.
+ detach (bool): Usually when the encoder is shared between actor and critics,
+ we want to detach the encoded features on the policy side to avoid backprop through the encoder.
+ More detail here `https://cdn.aaai.org/ojs/17276/17276-13-20770-1-2-20210518.pdf`
+
+ Returns:
+ Tensor: The encoded image features.
+ """
+ feats = []
+ for k, feat in cache.items():
+ safe_key = k.replace(".", "_")
+ x = self.spatial_embeddings[safe_key](feat)
+ x = self.post_encoders[safe_key](x)
+ if detach:
+ x = x.detach()
+ feats.append(x)
+ return torch.cat(feats, dim=-1)
+
+ @property
+ def output_dim(self) -> int:
+ return self._out_dim
+
+
+class MLP(nn.Module):
+ """Multi-layer perceptron builder.
+
+ Dynamically constructs a sequence of layers based on `hidden_dims`:
+ 1) Linear (in_dim -> out_dim)
+ 2) Optional Dropout if `dropout_rate` > 0 and (not final layer or `activate_final`)
+ 3) LayerNorm on the output features
+ 4) Activation (standard for intermediate layers, `final_activation` for last layer if `activate_final`)
+
+ Arguments:
+ input_dim (int): Size of input feature dimension.
+ hidden_dims (list[int]): Sizes for each hidden layer.
+ activations (Callable or str): Activation to apply between layers.
+ activate_final (bool): Whether to apply activation at the final layer.
+ dropout_rate (Optional[float]): Dropout probability applied before normalization and activation.
+ final_activation (Optional[Callable or str]): Activation for the final layer when `activate_final` is True.
+
+ For each layer, `in_dim` is updated to the previous `out_dim`. All constructed modules are
+ stored in `self.net` as an `nn.Sequential` container.
+ """
+
+ def __init__(
+ self,
+ input_dim: int,
+ hidden_dims: list[int],
+ activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(),
+ activate_final: bool = False,
+ dropout_rate: float | None = None,
+ final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None,
+ ):
+ super().__init__()
+ layers: list[nn.Module] = []
+ in_dim = input_dim
+ total = len(hidden_dims)
+
+ for idx, out_dim in enumerate(hidden_dims):
+ # 1) linear transform
+ layers.append(nn.Linear(in_dim, out_dim))
+
+ is_last = idx == total - 1
+ # 2-4) optionally add dropout, normalization, and activation
+ if not is_last or activate_final:
+ if dropout_rate and dropout_rate > 0:
+ layers.append(nn.Dropout(p=dropout_rate))
+ layers.append(nn.LayerNorm(out_dim))
+ act_cls = final_activation if is_last and final_activation else activations
+ act = act_cls if isinstance(act_cls, nn.Module) else getattr(nn, act_cls)()
+ layers.append(act)
+
+ in_dim = out_dim
+
+ self.net = nn.Sequential(*layers)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return self.net(x)
+
+
+class CriticHead(nn.Module):
+ def __init__(
+ self,
+ input_dim: int,
+ hidden_dims: list[int],
+ activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(),
+ activate_final: bool = False,
+ dropout_rate: float | None = None,
+ init_final: float | None = None,
+ final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None,
+ ):
+ super().__init__()
+ self.net = MLP(
+ input_dim=input_dim,
+ hidden_dims=hidden_dims,
+ activations=activations,
+ activate_final=activate_final,
+ dropout_rate=dropout_rate,
+ final_activation=final_activation,
+ )
+ self.output_layer = nn.Linear(in_features=hidden_dims[-1], out_features=1)
+ if init_final is not None:
+ nn.init.uniform_(self.output_layer.weight, -init_final, init_final)
+ nn.init.uniform_(self.output_layer.bias, -init_final, init_final)
+ else:
+ orthogonal_init()(self.output_layer.weight)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return self.output_layer(self.net(x))
+
+
+class CriticEnsemble(nn.Module):
+ """
+ CriticEnsemble wraps multiple CriticHead modules into an ensemble.
+
+ Args:
+ encoder (SACObservationEncoder): encoder for observations.
+ ensemble (List[CriticHead]): list of critic heads.
+ output_normalization (nn.Module): normalization layer for actions.
+ init_final (float | None): optional initializer scale for final layers.
+
+ Forward returns a tensor of shape (num_critics, batch_size) containing Q-values.
+ """
+
+ def __init__(
+ self,
+ encoder: SACObservationEncoder,
+ ensemble: list[CriticHead],
+ output_normalization: nn.Module,
+ init_final: float | None = None,
+ ):
+ super().__init__()
+ self.encoder = encoder
+ self.init_final = init_final
+ self.output_normalization = output_normalization
+ self.critics = nn.ModuleList(ensemble)
+
+ def forward(
+ self,
+ observations: dict[str, torch.Tensor],
+ actions: torch.Tensor,
+ observation_features: torch.Tensor | None = None,
+ ) -> torch.Tensor:
+ device = get_device_from_parameters(self)
+ # Move each tensor in observations to device
+ observations = {k: v.to(device) for k, v in observations.items()}
+ # NOTE: We normalize actions it helps for sample efficiency
+ actions: dict[str, torch.tensor] = {"action": actions}
+ # NOTE: Normalization layer took dict in input and outputs a dict that why
+ actions = self.output_normalization(actions)["action"]
+ actions = actions.to(device)
+
+ obs_enc = self.encoder(observations, cache=observation_features)
+
+ inputs = torch.cat([obs_enc, actions], dim=-1)
+
+ # Loop through critics and collect outputs
+ q_values = []
+ for critic in self.critics:
+ q_values.append(critic(inputs))
+
+ # Stack outputs to match expected shape [num_critics, batch_size]
+ q_values = torch.stack([q.squeeze(-1) for q in q_values], dim=0)
+ return q_values
+
+
+class DiscreteCritic(nn.Module):
+ def __init__(
+ self,
+ encoder: nn.Module,
+ input_dim: int,
+ hidden_dims: list[int],
+ output_dim: int = 3,
+ activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(),
+ activate_final: bool = False,
+ dropout_rate: float | None = None,
+ init_final: float | None = None,
+ final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None,
+ ):
+ super().__init__()
+ self.encoder = encoder
+ self.output_dim = output_dim
+
+ self.net = MLP(
+ input_dim=input_dim,
+ hidden_dims=hidden_dims,
+ activations=activations,
+ activate_final=activate_final,
+ dropout_rate=dropout_rate,
+ final_activation=final_activation,
+ )
+
+ self.output_layer = nn.Linear(in_features=hidden_dims[-1], out_features=self.output_dim)
+ if init_final is not None:
+ nn.init.uniform_(self.output_layer.weight, -init_final, init_final)
+ nn.init.uniform_(self.output_layer.bias, -init_final, init_final)
+ else:
+ orthogonal_init()(self.output_layer.weight)
+
+ def forward(
+ self, observations: torch.Tensor, observation_features: torch.Tensor | None = None
+ ) -> torch.Tensor:
+ device = get_device_from_parameters(self)
+ observations = {k: v.to(device) for k, v in observations.items()}
+ obs_enc = self.encoder(observations, cache=observation_features)
+ return self.output_layer(self.net(obs_enc))
+
+
+class Policy(nn.Module):
+ def __init__(
+ self,
+ encoder: SACObservationEncoder,
+ network: nn.Module,
+ action_dim: int,
+ std_min: float = -5,
+ std_max: float = 2,
+ fixed_std: torch.Tensor | None = None,
+ init_final: float | None = None,
+ use_tanh_squash: bool = False,
+ encoder_is_shared: bool = False,
+ ):
+ super().__init__()
+ self.encoder: SACObservationEncoder = encoder
+ self.network = network
+ self.action_dim = action_dim
+ self.std_min = std_min
+ self.std_max = std_max
+ self.fixed_std = fixed_std
+ self.use_tanh_squash = use_tanh_squash
+ self.encoder_is_shared = encoder_is_shared
+
+ # Find the last Linear layer's output dimension
+ for layer in reversed(network.net):
+ if isinstance(layer, nn.Linear):
+ out_features = layer.out_features
+ break
+ # Mean layer
+ self.mean_layer = nn.Linear(out_features, action_dim)
+ if init_final is not None:
+ nn.init.uniform_(self.mean_layer.weight, -init_final, init_final)
+ nn.init.uniform_(self.mean_layer.bias, -init_final, init_final)
+ else:
+ orthogonal_init()(self.mean_layer.weight)
+
+ # Standard deviation layer or parameter
+ if fixed_std is None:
+ self.std_layer = nn.Linear(out_features, action_dim)
+ if init_final is not None:
+ nn.init.uniform_(self.std_layer.weight, -init_final, init_final)
+ nn.init.uniform_(self.std_layer.bias, -init_final, init_final)
+ else:
+ orthogonal_init()(self.std_layer.weight)
+
+ def forward(
+ self,
+ observations: torch.Tensor,
+ observation_features: torch.Tensor | None = None,
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ # We detach the encoder if it is shared to avoid backprop through it
+ # This is important to avoid the encoder to be updated through the policy
+ obs_enc = self.encoder(observations, cache=observation_features, detach=self.encoder_is_shared)
+
+ # Get network outputs
+ outputs = self.network(obs_enc)
+ means = self.mean_layer(outputs)
+
+ # Compute standard deviations
+ if self.fixed_std is None:
+ log_std = self.std_layer(outputs)
+ std = torch.exp(log_std) # Match JAX "exp"
+ std = torch.clamp(std, self.std_min, self.std_max) # Match JAX default clip
+ else:
+ std = self.fixed_std.expand_as(means)
+
+ # Build transformed distribution
+ dist = TanhMultivariateNormalDiag(loc=means, scale_diag=std)
+
+ # Sample actions (reparameterized)
+ actions = dist.rsample()
+
+ # Compute log_probs
+ log_probs = dist.log_prob(actions)
+
+ return actions, log_probs, means
+
+ def get_features(self, observations: torch.Tensor) -> torch.Tensor:
+ """Get encoded features from observations"""
+ device = get_device_from_parameters(self)
+ observations = observations.to(device)
+ if self.encoder is not None:
+ with torch.inference_mode():
+ return self.encoder(observations)
+ return observations
+
+
+class DefaultImageEncoder(nn.Module):
+ def __init__(self, config: SACConfig):
+ super().__init__()
+ image_key = next(key for key in config.input_features if is_image_feature(key))
+ self.image_enc_layers = nn.Sequential(
+ nn.Conv2d(
+ in_channels=config.input_features[image_key].shape[0],
+ out_channels=config.image_encoder_hidden_dim,
+ kernel_size=7,
+ stride=2,
+ ),
+ nn.ReLU(),
+ nn.Conv2d(
+ in_channels=config.image_encoder_hidden_dim,
+ out_channels=config.image_encoder_hidden_dim,
+ kernel_size=5,
+ stride=2,
+ ),
+ nn.ReLU(),
+ nn.Conv2d(
+ in_channels=config.image_encoder_hidden_dim,
+ out_channels=config.image_encoder_hidden_dim,
+ kernel_size=3,
+ stride=2,
+ ),
+ nn.ReLU(),
+ nn.Conv2d(
+ in_channels=config.image_encoder_hidden_dim,
+ out_channels=config.image_encoder_hidden_dim,
+ kernel_size=3,
+ stride=2,
+ ),
+ nn.ReLU(),
+ )
+
+ def forward(self, x):
+ x = self.image_enc_layers(x)
+ return x
+
+
+def freeze_image_encoder(image_encoder: nn.Module):
+ """Freeze all parameters in the encoder"""
+ for param in image_encoder.parameters():
+ param.requires_grad = False
+
+
+class PretrainedImageEncoder(nn.Module):
+ def __init__(self, config: SACConfig):
+ super().__init__()
+
+ self.image_enc_layers, self.image_enc_out_shape = self._load_pretrained_vision_encoder(config)
+
+ def _load_pretrained_vision_encoder(self, config: SACConfig):
+ """Set up CNN encoder"""
+ from transformers import AutoModel
+
+ self.image_enc_layers = AutoModel.from_pretrained(config.vision_encoder_name, trust_remote_code=True)
+
+ if hasattr(self.image_enc_layers.config, "hidden_sizes"):
+ self.image_enc_out_shape = self.image_enc_layers.config.hidden_sizes[-1] # Last channel dimension
+ elif hasattr(self.image_enc_layers, "fc"):
+ self.image_enc_out_shape = self.image_enc_layers.fc.in_features
+ else:
+ raise ValueError("Unsupported vision encoder architecture, make sure you are using a CNN")
+ return self.image_enc_layers, self.image_enc_out_shape
+
+ def forward(self, x):
+ enc_feat = self.image_enc_layers(x).last_hidden_state
+ return enc_feat
+
+
+def orthogonal_init():
+ return lambda x: torch.nn.init.orthogonal_(x, gain=1.0)
+
+
+class SpatialLearnedEmbeddings(nn.Module):
+ def __init__(self, height, width, channel, num_features=8):
+ """
+ PyTorch implementation of learned spatial embeddings
+
+ Args:
+ height: Spatial height of input features
+ width: Spatial width of input features
+ channel: Number of input channels
+ num_features: Number of output embedding dimensions
+ """
+ super().__init__()
+ self.height = height
+ self.width = width
+ self.channel = channel
+ self.num_features = num_features
+
+ self.kernel = nn.Parameter(torch.empty(channel, height, width, num_features))
+
+ nn.init.kaiming_normal_(self.kernel, mode="fan_in", nonlinearity="linear")
+
+ def forward(self, features):
+ """
+ Forward pass for spatial embedding
+
+ Args:
+ features: Input tensor of shape [B, C, H, W] where B is batch size,
+ C is number of channels, H is height, and W is width
+ Returns:
+ Output tensor of shape [B, C*F] where F is the number of features
+ """
+
+ features_expanded = features.unsqueeze(-1) # [B, C, H, W, 1]
+ kernel_expanded = self.kernel.unsqueeze(0) # [1, C, H, W, F]
+
+ # Element-wise multiplication and spatial reduction
+ output = (features_expanded * kernel_expanded).sum(dim=(2, 3)) # Sum over H,W dimensions
+
+ # Reshape to combine channel and feature dimensions
+ output = output.view(output.size(0), -1) # [B, C*F]
+
+ return output
+
+
+class RescaleFromTanh(Transform):
+ def __init__(self, low: float = -1, high: float = 1):
+ super().__init__()
+
+ self.low = low
+
+ self.high = high
+
+ def _call(self, x):
+ # Rescale from (-1, 1) to (low, high)
+
+ return 0.5 * (x + 1.0) * (self.high - self.low) + self.low
+
+ def _inverse(self, y):
+ # Rescale from (low, high) back to (-1, 1)
+
+ return 2.0 * (y - self.low) / (self.high - self.low) - 1.0
+
+ def log_abs_det_jacobian(self, x, y):
+ # log|d(rescale)/dx| = sum(log(0.5 * (high - low)))
+
+ scale = 0.5 * (self.high - self.low)
+
+ return torch.sum(torch.log(scale), dim=-1)
+
+
+class TanhMultivariateNormalDiag(TransformedDistribution):
+ def __init__(self, loc, scale_diag, low=None, high=None):
+ base_dist = MultivariateNormal(loc, torch.diag_embed(scale_diag))
+
+ transforms = [TanhTransform(cache_size=1)]
+
+ if low is not None and high is not None:
+ low = torch.as_tensor(low)
+
+ high = torch.as_tensor(high)
+
+ transforms.insert(0, RescaleFromTanh(low, high))
+
+ super().__init__(base_dist, transforms)
+
+ def mode(self):
+ # Mode is mean of base distribution, passed through transforms
+
+ x = self.base_dist.mean
+
+ for transform in self.transforms:
+ x = transform(x)
+
+ return x
+
+ def stddev(self):
+ std = self.base_dist.stddev
+
+ x = std
+
+ for transform in self.transforms:
+ x = transform(x)
+
+ return x
+
+
+def _convert_normalization_params_to_tensor(normalization_params: dict) -> dict:
+ converted_params = {}
+ for outer_key, inner_dict in normalization_params.items():
+ converted_params[outer_key] = {}
+ for key, value in inner_dict.items():
+ converted_params[outer_key][key] = torch.tensor(value)
+ if "image" in outer_key:
+ converted_params[outer_key][key] = converted_params[outer_key][key].view(3, 1, 1)
+
+ return converted_params
diff --git a/src/lerobot/policies/sac/reward_model/configuration_classifier.py b/src/lerobot/policies/sac/reward_model/configuration_classifier.py
new file mode 100644
index 0000000000..fc53283b30
--- /dev/null
+++ b/src/lerobot/policies/sac/reward_model/configuration_classifier.py
@@ -0,0 +1,76 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from dataclasses import dataclass, field
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import NormalizationMode
+from lerobot.optim.optimizers import AdamWConfig, OptimizerConfig
+from lerobot.optim.schedulers import LRSchedulerConfig
+
+
+@PreTrainedConfig.register_subclass(name="reward_classifier")
+@dataclass
+class RewardClassifierConfig(PreTrainedConfig):
+ """Configuration for the Reward Classifier model."""
+
+ name: str = "reward_classifier"
+ num_classes: int = 2
+ hidden_dim: int = 256
+ latent_dim: int = 256
+ image_embedding_pooling_dim: int = 8
+ dropout_rate: float = 0.1
+ model_name: str = "helper2424/resnet10"
+ device: str = "cpu"
+ model_type: str = "cnn" # "transformer" or "cnn"
+ num_cameras: int = 2
+ learning_rate: float = 1e-4
+ weight_decay: float = 0.01
+ grad_clip_norm: float = 1.0
+ normalization_mapping: dict[str, NormalizationMode] = field(
+ default_factory=lambda: {
+ "VISUAL": NormalizationMode.MEAN_STD,
+ }
+ )
+
+ @property
+ def observation_delta_indices(self) -> list | None:
+ return None
+
+ @property
+ def action_delta_indices(self) -> list | None:
+ return None
+
+ @property
+ def reward_delta_indices(self) -> list | None:
+ return None
+
+ def get_optimizer_preset(self) -> OptimizerConfig:
+ return AdamWConfig(
+ lr=self.learning_rate,
+ weight_decay=self.weight_decay,
+ grad_clip_norm=self.grad_clip_norm,
+ )
+
+ def get_scheduler_preset(self) -> LRSchedulerConfig | None:
+ return None
+
+ def validate_features(self) -> None:
+ """Validate feature configurations."""
+ has_image = any(key.startswith("observation.image") for key in self.input_features)
+ if not has_image:
+ raise ValueError(
+ "You must provide an image observation (key starting with 'observation.image') in the input features"
+ )
diff --git a/src/lerobot/policies/sac/reward_model/modeling_classifier.py b/src/lerobot/policies/sac/reward_model/modeling_classifier.py
new file mode 100644
index 0000000000..cadd1c9f28
--- /dev/null
+++ b/src/lerobot/policies/sac/reward_model/modeling_classifier.py
@@ -0,0 +1,323 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import torch
+from torch import Tensor, nn
+
+from lerobot.constants import OBS_IMAGE, REWARD
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
+
+
+class ClassifierOutput:
+ """Wrapper for classifier outputs with additional metadata."""
+
+ def __init__(
+ self,
+ logits: Tensor,
+ probabilities: Tensor | None = None,
+ hidden_states: Tensor | None = None,
+ ):
+ self.logits = logits
+ self.probabilities = probabilities
+ self.hidden_states = hidden_states
+
+ def __repr__(self):
+ return (
+ f"ClassifierOutput(logits={self.logits}, "
+ f"probabilities={self.probabilities}, "
+ f"hidden_states={self.hidden_states})"
+ )
+
+
+class SpatialLearnedEmbeddings(nn.Module):
+ def __init__(self, height, width, channel, num_features=8):
+ """
+ PyTorch implementation of learned spatial embeddings
+
+ Args:
+ height: Spatial height of input features
+ width: Spatial width of input features
+ channel: Number of input channels
+ num_features: Number of output embedding dimensions
+ """
+ super().__init__()
+ self.height = height
+ self.width = width
+ self.channel = channel
+ self.num_features = num_features
+
+ self.kernel = nn.Parameter(torch.empty(channel, height, width, num_features))
+
+ nn.init.kaiming_normal_(self.kernel, mode="fan_in", nonlinearity="linear")
+
+ def forward(self, features):
+ """
+ Forward pass for spatial embedding
+
+ Args:
+ features: Input tensor of shape [B, H, W, C] or [H, W, C] if no batch
+ Returns:
+ Output tensor of shape [B, C*F] or [C*F] if no batch
+ """
+
+ features = features.last_hidden_state
+
+ original_shape = features.shape
+ if features.dim() == 3:
+ features = features.unsqueeze(0) # Add batch dim
+
+ features_expanded = features.unsqueeze(-1) # [B, H, W, C, 1]
+ kernel_expanded = self.kernel.unsqueeze(0) # [1, H, W, C, F]
+
+ # Element-wise multiplication and spatial reduction
+ output = (features_expanded * kernel_expanded).sum(dim=(2, 3)) # Sum H,W
+
+ # Reshape to combine channel and feature dimensions
+ output = output.view(output.size(0), -1) # [B, C*F]
+
+ # Remove batch dim
+ if len(original_shape) == 3:
+ output = output.squeeze(0)
+
+ return output
+
+
+class Classifier(PreTrainedPolicy):
+ """Image classifier built on top of a pre-trained encoder."""
+
+ name = "reward_classifier"
+ config_class = RewardClassifierConfig
+
+ def __init__(
+ self,
+ config: RewardClassifierConfig,
+ dataset_stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ from transformers import AutoModel
+
+ super().__init__(config)
+ self.config = config
+
+ # Initialize normalization (standardized with the policy framework)
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
+ self.normalize_targets = Normalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+ self.unnormalize_outputs = Unnormalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+
+ # Set up encoder
+ encoder = AutoModel.from_pretrained(self.config.model_name, trust_remote_code=True)
+ # Extract vision model if we're given a multimodal model
+ if hasattr(encoder, "vision_model"):
+ logging.info("Multimodal model detected - using vision encoder only")
+ self.encoder = encoder.vision_model
+ self.vision_config = encoder.config.vision_config
+ else:
+ self.encoder = encoder
+ self.vision_config = getattr(encoder, "config", None)
+
+ # Model type from config
+ self.is_cnn = self.config.model_type == "cnn"
+
+ # For CNNs, initialize backbone
+ if self.is_cnn:
+ self._setup_cnn_backbone()
+
+ self._freeze_encoder()
+
+ # Extract image keys from input_features
+ self.image_keys = [
+ key.replace(".", "_") for key in config.input_features if key.startswith(OBS_IMAGE)
+ ]
+
+ if self.is_cnn:
+ self.encoders = nn.ModuleDict()
+ for image_key in self.image_keys:
+ encoder = self._create_single_encoder()
+ self.encoders[image_key] = encoder
+
+ self._build_classifier_head()
+
+ def _setup_cnn_backbone(self):
+ """Set up CNN encoder"""
+ if hasattr(self.encoder, "fc"):
+ self.feature_dim = self.encoder.fc.in_features
+ self.encoder = nn.Sequential(*list(self.encoder.children())[:-1])
+ elif hasattr(self.encoder.config, "hidden_sizes"):
+ self.feature_dim = self.encoder.config.hidden_sizes[-1] # Last channel dimension
+ else:
+ raise ValueError("Unsupported CNN architecture")
+
+ def _freeze_encoder(self) -> None:
+ """Freeze the encoder parameters."""
+ for param in self.encoder.parameters():
+ param.requires_grad = False
+
+ def _create_single_encoder(self):
+ encoder = nn.Sequential(
+ self.encoder,
+ SpatialLearnedEmbeddings(
+ height=4,
+ width=4,
+ channel=self.feature_dim,
+ num_features=self.config.image_embedding_pooling_dim,
+ ),
+ nn.Dropout(self.config.dropout_rate),
+ nn.Linear(self.feature_dim * self.config.image_embedding_pooling_dim, self.config.latent_dim),
+ nn.LayerNorm(self.config.latent_dim),
+ nn.Tanh(),
+ )
+
+ return encoder
+
+ def _build_classifier_head(self) -> None:
+ """Initialize the classifier head architecture."""
+ # Get input dimension based on model type
+ if self.is_cnn:
+ input_dim = self.config.latent_dim
+ else: # Transformer models
+ if hasattr(self.encoder.config, "hidden_size"):
+ input_dim = self.encoder.config.hidden_size
+ else:
+ raise ValueError("Unsupported transformer architecture since hidden_size is not found")
+
+ self.classifier_head = nn.Sequential(
+ nn.Linear(input_dim * self.config.num_cameras, self.config.hidden_dim),
+ nn.Dropout(self.config.dropout_rate),
+ nn.LayerNorm(self.config.hidden_dim),
+ nn.ReLU(),
+ nn.Linear(
+ self.config.hidden_dim,
+ 1 if self.config.num_classes == 2 else self.config.num_classes,
+ ),
+ )
+
+ def _get_encoder_output(self, x: torch.Tensor, image_key: str) -> torch.Tensor:
+ """Extract the appropriate output from the encoder."""
+ with torch.no_grad():
+ if self.is_cnn:
+ # The HF ResNet applies pooling internally
+ outputs = self.encoders[image_key](x)
+ return outputs
+ else: # Transformer models
+ outputs = self.encoder(x)
+ return outputs.last_hidden_state[:, 0, :]
+
+ def extract_images_and_labels(self, batch: dict[str, Tensor]) -> tuple[list, Tensor]:
+ """Extract image tensors and label tensors from batch."""
+ # Check for both OBS_IMAGE and OBS_IMAGES prefixes
+ images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)]
+ labels = batch[REWARD]
+
+ return images, labels
+
+ def predict(self, xs: list) -> ClassifierOutput:
+ """Forward pass of the classifier for inference."""
+ encoder_outputs = torch.hstack(
+ [self._get_encoder_output(x, img_key) for x, img_key in zip(xs, self.image_keys, strict=True)]
+ )
+ logits = self.classifier_head(encoder_outputs)
+
+ if self.config.num_classes == 2:
+ logits = logits.squeeze(-1)
+ probabilities = torch.sigmoid(logits)
+ else:
+ probabilities = torch.softmax(logits, dim=-1)
+
+ return ClassifierOutput(logits=logits, probabilities=probabilities, hidden_states=encoder_outputs)
+
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict[str, Tensor]]:
+ """Standard forward pass for training compatible with train.py."""
+ # Normalize inputs if needed
+ batch = self.normalize_inputs(batch)
+ batch = self.normalize_targets(batch)
+
+ # Extract images and labels
+ images, labels = self.extract_images_and_labels(batch)
+
+ # Get predictions
+ outputs = self.predict(images)
+
+ # Calculate loss
+ if self.config.num_classes == 2:
+ # Binary classification
+ loss = nn.functional.binary_cross_entropy_with_logits(outputs.logits, labels)
+ predictions = (torch.sigmoid(outputs.logits) > 0.5).float()
+ else:
+ # Multi-class classification
+ loss = nn.functional.cross_entropy(outputs.logits, labels.long())
+ predictions = torch.argmax(outputs.logits, dim=1)
+
+ # Calculate accuracy for logging
+ correct = (predictions == labels).sum().item()
+ total = labels.size(0)
+ accuracy = 100 * correct / total
+
+ # Return loss and metrics for logging
+ output_dict = {
+ "accuracy": accuracy,
+ "correct": correct,
+ "total": total,
+ }
+
+ return loss, output_dict
+
+ def predict_reward(self, batch, threshold=0.5):
+ """Eval method. Returns predicted reward with the decision threshold as argument."""
+ # Check for both OBS_IMAGE and OBS_IMAGES prefixes
+ batch = self.normalize_inputs(batch)
+ batch = self.normalize_targets(batch)
+
+ # Extract images from batch dict
+ images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)]
+
+ if self.config.num_classes == 2:
+ probs = self.predict(images).probabilities
+ logging.debug(f"Predicted reward images: {probs}")
+ return (probs > threshold).float()
+ else:
+ return torch.argmax(self.predict(images).probabilities, dim=1)
+
+ def get_optim_params(self):
+ """Return optimizer parameters for the policy."""
+ return self.parameters()
+
+ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
+ """
+ This method is required by PreTrainedPolicy but not used for reward classifiers.
+ The reward classifier is not an actor and does not select actions.
+ """
+ raise NotImplementedError("Reward classifiers do not select actions")
+
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """
+ This method is required by PreTrainedPolicy but not used for reward classifiers.
+ The reward classifier is not an actor and does not produce action chunks.
+ """
+ raise NotImplementedError("Reward classifiers do not predict action chunks")
+
+ def reset(self):
+ """
+ This method is required by PreTrainedPolicy but not used for reward classifiers.
+ The reward classifier is not an actor and does not select actions.
+ """
+ pass
diff --git a/src/lerobot/policies/smolvla/configuration_smolvla.py b/src/lerobot/policies/smolvla/configuration_smolvla.py
new file mode 100644
index 0000000000..571900c4a6
--- /dev/null
+++ b/src/lerobot/policies/smolvla/configuration_smolvla.py
@@ -0,0 +1,154 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
+from lerobot.optim.optimizers import AdamWConfig
+from lerobot.optim.schedulers import (
+ CosineDecayWithWarmupSchedulerConfig,
+)
+
+
+@PreTrainedConfig.register_subclass("smolvla")
+@dataclass
+class SmolVLAConfig(PreTrainedConfig):
+ # Input / output structure.
+ n_obs_steps: int = 1
+ chunk_size: int = 50
+ n_action_steps: int = 50
+
+ normalization_mapping: dict[str, NormalizationMode] = field(
+ default_factory=lambda: {
+ "VISUAL": NormalizationMode.IDENTITY,
+ "STATE": NormalizationMode.MEAN_STD,
+ "ACTION": NormalizationMode.MEAN_STD,
+ }
+ )
+
+ # Shorter state and action vectors will be padded
+ max_state_dim: int = 32
+ max_action_dim: int = 32
+
+ # Image preprocessing
+ resize_imgs_with_padding: tuple[int, int] = (512, 512)
+
+ # Add empty images. Used by smolvla_aloha_sim which adds the empty
+ # left and right wrist cameras in addition to the top camera.
+ empty_cameras: int = 0
+
+ # Converts the joint and gripper values from the standard Aloha space to
+ # the space used by the pi internal runtime which was used to train the base model.
+ adapt_to_pi_aloha: bool = False
+
+ # Converts joint dimensions to deltas with respect to the current state before passing to the model.
+ # Gripper dimensions will remain in absolute values.
+ use_delta_joint_actions_aloha: bool = False
+
+ # Tokenizer
+ tokenizer_max_length: int = 48
+
+ # Decoding
+ num_steps: int = 10
+
+ # Attention utils
+ use_cache: bool = True
+
+ # Finetuning settings
+ freeze_vision_encoder: bool = True
+ train_expert_only: bool = True
+ train_state_proj: bool = True
+
+ # Training presets
+ optimizer_lr: float = 1e-4
+ optimizer_betas: tuple[float, float] = (0.9, 0.95)
+ optimizer_eps: float = 1e-8
+ optimizer_weight_decay: float = 1e-10
+ optimizer_grad_clip_norm: float = 10
+
+ scheduler_warmup_steps: int = 1_000
+ scheduler_decay_steps: int = 30_000
+ scheduler_decay_lr: float = 2.5e-6
+
+ vlm_model_name: str = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct" # Select the VLM backbone.
+ load_vlm_weights: bool = False # Set to True in case of training the expert from scratch. True when init from pretrained SmolVLA weights
+
+ add_image_special_tokens: bool = False # Whether to use special image tokens around image features.
+
+ attention_mode: str = "cross_attn"
+
+ prefix_length: int = -1
+
+ pad_language_to: str = "longest" # "max_length"
+
+ num_expert_layers: int = -1 # Less or equal to 0 is the default where the action expert has the same number of layers of VLM. Otherwise the expert have less layers.
+ num_vlm_layers: int = 16 # Number of layers used in the VLM (first num_vlm_layers layers)
+ self_attn_every_n_layers: int = 2 # Interleave SA layers each self_attn_every_n_layers
+ expert_width_multiplier: float = 0.75 # The action expert hidden size (wrt to the VLM)
+
+ min_period: float = 4e-3 # sensitivity range for the timestep used in sine-cosine positional encoding
+ max_period: float = 4.0
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ """Input validation (not exhaustive)."""
+ if self.n_action_steps > self.chunk_size:
+ raise ValueError(
+ f"The chunk size is the upper bound for the number of action steps per model invocation. Got "
+ f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`."
+ )
+ if self.use_delta_joint_actions_aloha:
+ raise NotImplementedError(
+ "`use_delta_joint_actions_aloha` is used by smolvla for aloha real models. It is not ported yet in LeRobot."
+ )
+
+ def validate_features(self) -> None:
+ for i in range(self.empty_cameras):
+ key = f"observation.images.empty_camera_{i}"
+ empty_camera = PolicyFeature(
+ type=FeatureType.VISUAL,
+ shape=(3, 480, 640),
+ )
+ self.input_features[key] = empty_camera
+
+ def get_optimizer_preset(self) -> AdamWConfig:
+ return AdamWConfig(
+ lr=self.optimizer_lr,
+ betas=self.optimizer_betas,
+ eps=self.optimizer_eps,
+ weight_decay=self.optimizer_weight_decay,
+ grad_clip_norm=self.optimizer_grad_clip_norm,
+ )
+
+ def get_scheduler_preset(self):
+ return CosineDecayWithWarmupSchedulerConfig(
+ peak_lr=self.optimizer_lr,
+ decay_lr=self.scheduler_decay_lr,
+ num_warmup_steps=self.scheduler_warmup_steps,
+ num_decay_steps=self.scheduler_decay_steps,
+ )
+
+ @property
+ def observation_delta_indices(self) -> list:
+ return [0]
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(self.chunk_size))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py
new file mode 100644
index 0000000000..a31e1b0783
--- /dev/null
+++ b/src/lerobot/policies/smolvla/modeling_smolvla.py
@@ -0,0 +1,941 @@
+#!/usr/bin/env python
+
+# Copyright 2025 HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+SmolVLA:
+
+[Paper](https://huggingface.co/papers/2506.01844)
+
+Designed by Hugging Face.
+
+Install smolvla extra dependencies:
+```bash
+pip install -e ".[smolvla]"
+```
+
+Example of finetuning the smolvla pretrained model (`smolvla_base`):
+```bash
+python -m lerobot.scripts.train \
+--policy.path=lerobot/smolvla_base \
+--dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \
+--batch_size=64 \
+--steps=200000
+```
+
+Example of finetuning a smolVLA. SmolVLA is composed of a pretrained VLM,
+and an action expert.
+```bash
+python -m lerobot.scripts.train \
+--policy.type=smolvla \
+--dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \
+--batch_size=64 \
+--steps=200000
+```
+
+Example of using the smolvla pretrained model outside LeRobot training framework:
+```python
+policy = SmolVLAPolicy.from_pretrained("lerobot/smolvla_base")
+```
+
+"""
+
+import math
+import os
+import re
+from collections import deque
+
+import safetensors
+import torch
+import torch.nn.functional as F # noqa: N812
+from torch import Tensor, nn
+from transformers import AutoProcessor
+
+from lerobot.constants import ACTION, OBS_STATE
+from lerobot.policies.normalize import (
+ Normalize,
+ Unnormalize,
+)
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
+from lerobot.policies.smolvla.smolvlm_with_expert import SmolVLMWithExpertModel
+from lerobot.policies.utils import (
+ populate_queues,
+)
+from lerobot.utils.utils import get_safe_dtype
+
+# Matches ".soNNN", optionally followed by "-something", up to the "_buffer_" marker
+_VARIANT_RE = re.compile(r"\.so\d+(?:-[\w]+)?_buffer_")
+
+
+def canonicalise(k: str) -> str:
+ """
+ Remove dataset-variant markers like '.so100-blue_' or '.so100_' from a
+ normalisation-buffer key.
+ """
+ return _VARIANT_RE.sub(".buffer_", k)
+
+
+def standardise_state_dict(
+ checkpoint: dict[str, torch.Tensor], ref_keys: set[str], *, verbose: bool = True
+) -> tuple[dict[str, torch.Tensor], list[str]]:
+ """
+ • Re-keys `checkpoint ` so that every entry matches the *reference* key set.
+ • If several variant keys collapse to the same canonical name we keep the
+ first one and log the collision.
+ • Returns the new dict + a list of entries that could not be matched.
+ """
+ out, collisions, unmatched = {}, {}, []
+
+ for k, v in checkpoint.items():
+ canon = canonicalise(k)
+ if canon in ref_keys:
+ if canon in out: # duplicate after collapsing
+ collisions.setdefault(canon, []).append(k)
+ else:
+ out[canon] = v
+ else:
+ unmatched.append(k)
+
+ if verbose:
+ for canon, variants in collisions.items():
+ print(f"[standardise_state_dict] '{canon}' ← {variants}")
+ if unmatched:
+ print(f"[standardise_state_dict] kept {len(unmatched)} unmatched keys")
+
+ out.update({k: checkpoint[k] for k in unmatched})
+ return out, unmatched
+
+
+def rename_checkpoint_keys(checkpoint: dict, rename_str: str):
+ """
+ Renames keys in a checkpoint dictionary based on the given rename string.
+
+ Args:
+ checkpoint (dict): The checkpoint dictionary.
+ rename_str (str): A string specifying key mappings in the format "old1//new1,old2//new2".
+
+ Returns:
+ dict: The modified checkpoint with renamed keys.
+ """
+
+ rename_dict = dict(pair.split("//") for pair in rename_str.split(","))
+
+ new_checkpoint = {}
+ for k, v in checkpoint.items():
+ for old_key, new_key in rename_dict.items():
+ if old_key in k:
+ k = k.replace(old_key, new_key)
+ new_checkpoint[k] = v
+ return new_checkpoint
+
+
+def load_smolvla(
+ model: torch.nn.Module,
+ filename: str | os.PathLike,
+ *,
+ device: str = "cpu",
+ checkpoint_keys_mapping: str = "",
+) -> torch.nn.Module:
+ state_dict = safetensors.torch.load_file(filename, device=device)
+
+ # Optional user-supplied renames (e.g. "model._orig_mod.//model.")
+ if checkpoint_keys_mapping and "//" in checkpoint_keys_mapping:
+ state_dict = rename_checkpoint_keys(state_dict, checkpoint_keys_mapping)
+
+ state_dict, _ = standardise_state_dict(state_dict, set(model.state_dict().keys()))
+
+ # HACK(aliberts): to not overwrite normalization parameters as they should come from the dataset
+ norm_keys = ("normalize_inputs", "normalize_targets", "unnormalize_outputs")
+ state_dict = {k: v for k, v in state_dict.items() if not k.startswith(norm_keys)}
+
+ missing, unexpected = model.load_state_dict(state_dict, strict=False)
+
+ if not all(key.startswith(norm_keys) for key in missing) or unexpected:
+ raise RuntimeError(
+ "SmolVLA %d missing / %d unexpected keys",
+ len(missing),
+ len(unexpected),
+ )
+
+ return model
+
+
+def create_sinusoidal_pos_embedding(
+ time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu"
+) -> Tensor:
+ """Computes sine-cosine positional embedding vectors for scalar positions."""
+ if dimension % 2 != 0:
+ raise ValueError(f"dimension ({dimension}) must be divisible by 2")
+
+ if time.ndim != 1:
+ raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
+
+ dtype = get_safe_dtype(torch.float64, device.type)
+ fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
+ period = min_period * (max_period / min_period) ** fraction
+
+ # Compute the outer product
+ scaling_factor = 1.0 / period * 2 * math.pi
+ sin_input = scaling_factor[None, :] * time[:, None]
+ pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
+ return pos_emb
+
+
+def sample_beta(alpha, beta, bsize, device):
+ gamma1 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / alpha)
+ gamma2 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / beta)
+ return gamma1 / (gamma1 + gamma2)
+
+
+def make_att_2d_masks(pad_masks, att_masks):
+ """Copied from big_vision.
+
+ Tokens can attend to valid inputs tokens which have a cumulative mask_ar
+ smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
+ setup several types of attention, for example:
+
+ [[1 1 1 1 1 1]]: pure causal attention.
+
+ [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
+ themselves and the last 3 tokens have a causal attention. The first
+ entry could also be a 1 without changing behaviour.
+
+ [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
+ block can attend all previous blocks and all tokens on the same block.
+
+ Args:
+ input_mask: bool[B, N] true if its part of the input, false if padding.
+ mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on
+ it and 0 where it shares the same attention mask as the previous token.
+ """
+ if att_masks.ndim != 2:
+ raise ValueError(att_masks.ndim)
+ if pad_masks.ndim != 2:
+ raise ValueError(pad_masks.ndim)
+
+ cumsum = torch.cumsum(att_masks, dim=1)
+ att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
+ pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
+ att_2d_masks = att_2d_masks & pad_2d_masks
+ return att_2d_masks
+
+
+def resize_with_pad(img, width, height, pad_value=-1):
+ # assume no-op when width height fits already
+ if img.ndim != 4:
+ raise ValueError(f"(b,c,h,w) expected, but {img.shape}")
+
+ cur_height, cur_width = img.shape[2:]
+
+ ratio = max(cur_width / width, cur_height / height)
+ resized_height = int(cur_height / ratio)
+ resized_width = int(cur_width / ratio)
+ resized_img = F.interpolate(
+ img, size=(resized_height, resized_width), mode="bilinear", align_corners=False
+ )
+
+ pad_height = max(0, int(height - resized_height))
+ pad_width = max(0, int(width - resized_width))
+
+ # pad on left and top of image
+ padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value)
+ return padded_img
+
+
+def pad_vector(vector, new_dim):
+ """Can be (batch_size x sequence_length x features_dimension)
+ or (batch_size x features_dimension)
+ """
+ if vector.shape[-1] == new_dim:
+ return vector
+ shape = list(vector.shape)
+ current_dim = shape[-1]
+ shape[-1] = new_dim
+ new_vector = torch.zeros(*shape, dtype=vector.dtype, device=vector.device)
+ new_vector[..., :current_dim] = vector
+ return new_vector
+
+
+def normalize(x, min_val, max_val):
+ return (x - min_val) / (max_val - min_val)
+
+
+def unnormalize(x, min_val, max_val):
+ return x * (max_val - min_val) + min_val
+
+
+def safe_arcsin(value):
+ # This ensures that the input stays within
+ # [−1,1] to avoid invalid values for arcsin
+ return torch.arcsin(torch.clamp(value, -1.0, 1.0))
+
+
+def aloha_gripper_to_angular(value):
+ # Aloha transforms the gripper positions into a linear space. The following code
+ # reverses this transformation to be consistent with smolvla which is pretrained in
+ # angular space.
+ #
+ # These values are coming from the Aloha code:
+ # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
+ value = unnormalize(value, min_val=0.01844, max_val=0.05800)
+
+ # This is the inverse of the angular to linear transformation inside the Interbotix code.
+ def linear_to_radian(linear_position, arm_length, horn_radius):
+ value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
+ return safe_arcsin(value)
+
+ # The constants are taken from the Interbotix code.
+ value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
+
+ # Normalize to [0, 1].
+ # The values 0.4 and 1.5 were measured on an actual Trossen robot.
+ return normalize(value, min_val=0.4, max_val=1.5)
+
+
+def aloha_gripper_from_angular(value):
+ # Convert from the gripper position used by smolvla to the gripper position that is used by Aloha.
+ # Note that the units are still angular but the range is different.
+
+ # The values 0.4 and 1.5 were measured on an actual Trossen robot.
+ value = unnormalize(value, min_val=0.4, max_val=1.5)
+
+ # These values are coming from the Aloha code:
+ # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
+ return normalize(value, min_val=-0.6213, max_val=1.4910)
+
+
+def aloha_gripper_from_angular_inv(value):
+ # Directly inverts the gripper_from_angular function.
+ value = unnormalize(value, min_val=-0.6213, max_val=1.4910)
+ return normalize(value, min_val=0.4, max_val=1.5)
+
+
+class SmolVLAPolicy(PreTrainedPolicy):
+ """Wrapper class around VLAFlowMatching model to train and run inference within LeRobot."""
+
+ config_class = SmolVLAConfig
+ name = "smolvla"
+
+ def __init__(
+ self,
+ config: SmolVLAConfig,
+ dataset_stats: dict[str, dict[str, Tensor]] | None = None,
+ ):
+ """
+ Args:
+ config: Policy configuration class instance or None, in which case the default instantiation of
+ the configuration class is used.
+ dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
+ that they will be passed with a call to `load_state_dict` before the policy is used.
+ """
+
+ super().__init__(config)
+ config.validate_features()
+ self.config = config
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
+ self.normalize_targets = Normalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+ self.unnormalize_outputs = Unnormalize(
+ config.output_features, config.normalization_mapping, dataset_stats
+ )
+
+ self.language_tokenizer = AutoProcessor.from_pretrained(self.config.vlm_model_name).tokenizer
+ self.model = VLAFlowMatching(config)
+ self.reset()
+
+ def reset(self):
+ """This should be called whenever the environment is reset."""
+ self._queues = {
+ ACTION: deque(maxlen=self.config.n_action_steps),
+ }
+
+ # HACK(aliberts, danaaubakirova): we overwrite this classmethod here to fix smolVLA-specific issues
+ @classmethod
+ def _load_as_safetensor(
+ cls,
+ model: "SmolVLAPolicy",
+ model_file: str,
+ map_location: str,
+ strict: bool,
+ ):
+ safetensors.torch.load_model(model, model_file, strict=strict, device=map_location)
+ return load_smolvla(
+ model,
+ model_file,
+ device=map_location,
+ checkpoint_keys_mapping="model._orig_mod.//model.",
+ )
+
+ def get_optim_params(self) -> dict:
+ return self.parameters()
+
+ def _get_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
+ for k in batch:
+ if k in self._queues:
+ batch[k] = torch.stack(list(self._queues[k]), dim=1)
+
+ images, img_masks = self.prepare_images(batch)
+ state = self.prepare_state(batch)
+ lang_tokens, lang_masks = self.prepare_language(batch)
+
+ actions = self.model.sample_actions(images, img_masks, lang_tokens, lang_masks, state, noise=noise)
+
+ # Unpad actions
+ original_action_dim = self.config.action_feature.shape[0]
+ actions = actions[:, :, :original_action_dim]
+
+ actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
+
+ if self.config.adapt_to_pi_aloha:
+ actions = self._pi_aloha_encode_actions(actions)
+
+ return actions
+
+ def _prepare_batch(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ if self.config.adapt_to_pi_aloha:
+ batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
+
+ batch = self.normalize_inputs(batch)
+
+ return batch
+
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
+ self.eval()
+
+ batch = self._prepare_batch(batch)
+ self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION])
+
+ actions = self._get_action_chunk(batch, noise)
+ return actions
+
+ @torch.no_grad()
+ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
+ """Select a single action given environment observations.
+
+ This method wraps `select_actions` in order to return one action at a time for execution in the
+ environment. It works by managing the actions in a queue and only calling `select_actions` when the
+ queue is empty.
+ """
+ self.eval()
+ batch = self._prepare_batch(batch)
+ self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION])
+
+ # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
+ # querying the policy.
+ if len(self._queues[ACTION]) == 0:
+ actions = self._get_action_chunk(batch, noise)
+
+ # `self.predict_action_chunk` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
+ # effectively has shape (n_action_steps, batch_size, *), hence the transpose.
+ self._queues[ACTION].extend(actions.transpose(0, 1)[: self.config.n_action_steps])
+
+ return self._queues[ACTION].popleft()
+
+ def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, Tensor]:
+ """Do a full training forward pass to compute the loss"""
+ if self.config.adapt_to_pi_aloha:
+ batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
+ batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
+ batch = self.normalize_inputs(batch)
+ batch = self.normalize_targets(batch)
+ images, img_masks = self.prepare_images(batch)
+ state = self.prepare_state(batch)
+ lang_tokens, lang_masks = self.prepare_language(batch)
+ actions = self.prepare_action(batch)
+ actions_is_pad = batch.get("actions_id_pad")
+ loss_dict = {}
+ losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
+ loss_dict["losses_after_forward"] = losses.clone()
+
+ if actions_is_pad is not None:
+ in_episode_bound = ~actions_is_pad
+ losses = losses * in_episode_bound.unsqueeze(-1)
+ loss_dict["losses_after_in_ep_bound"] = losses.clone()
+
+ # Remove padding
+ losses = losses[:, :, : self.config.max_action_dim]
+ loss_dict["losses_after_rm_padding"] = losses.clone()
+
+ # For backward pass
+ loss = losses.mean()
+ # For backward pass
+ loss_dict["loss"] = loss.item()
+ return loss, loss_dict
+
+ def prepare_images(self, batch):
+ """Apply SmolVLA preprocessing to the images, like resizing to 224x224 and padding to keep aspect ratio, and
+ convert pixel range from [0.0, 1.0] to [-1.0, 1.0] as requested by SigLIP.
+ """
+ images = []
+ img_masks = []
+ present_img_keys = [key for key in self.config.image_features if key in batch]
+ missing_img_keys = [key for key in self.config.image_features if key not in batch]
+
+ if len(present_img_keys) == 0:
+ raise ValueError(
+ f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})"
+ )
+ # Preprocess image features present in the batch
+ for key in present_img_keys:
+ img = batch[key][:, -1, :, :, :] if batch[key].ndim == 5 else batch[key]
+ if self.config.resize_imgs_with_padding is not None:
+ img = resize_with_pad(img, *self.config.resize_imgs_with_padding, pad_value=0)
+
+ # Normalize from range [0,1] to [-1,1] as expacted by siglip
+ img = img * 2.0 - 1.0
+
+ bsize = img.shape[0]
+ device = img.device
+ if f"{key}_padding_mask" in batch:
+ mask = batch[f"{key}_padding_mask"].bool()
+ else:
+ mask = torch.ones(bsize, dtype=torch.bool, device=device)
+ images.append(img)
+ img_masks.append(mask)
+
+ # Create image features not present in the batch
+ # as fully 0 padded images.
+ for num_empty_cameras in range(len(missing_img_keys)):
+ if num_empty_cameras >= self.config.empty_cameras:
+ break
+ img = torch.ones_like(img) * -1
+ mask = torch.zeros_like(mask)
+ images.append(img)
+ img_masks.append(mask)
+ return images, img_masks
+
+ def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
+ """Tokenize the text input"""
+ device = batch[OBS_STATE].device
+ tasks = batch["task"]
+ if isinstance(tasks, str):
+ tasks = [tasks]
+
+ if len(tasks) == 1:
+ tasks = [tasks[0] for _ in range(batch[OBS_STATE].shape[0])]
+
+ tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks]
+
+ tokenized_prompt = self.language_tokenizer.__call__(
+ tasks,
+ padding=self.config.pad_language_to,
+ padding_side="right",
+ max_length=self.config.tokenizer_max_length,
+ return_tensors="pt",
+ )
+ lang_tokens = tokenized_prompt["input_ids"].to(device=device)
+ lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool)
+
+ return lang_tokens, lang_masks
+
+ def _pi_aloha_decode_state(self, state):
+ # Flip the joints.
+ for motor_idx in [1, 2, 8, 9]:
+ state[:, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx])
+ return state
+
+ def _pi_aloha_encode_actions(self, actions):
+ # Flip the joints.
+ for motor_idx in [1, 2, 8, 9]:
+ actions[:, :, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx])
+ return actions
+
+ def _pi_aloha_encode_actions_inv(self, actions):
+ # Flip the joints again.
+ for motor_idx in [1, 2, 8, 9]:
+ actions[:, :, motor_idx] *= -1
+ # Reverse the gripper transformation that is being applied by the Aloha runtime.
+ for motor_idx in [6, 13]:
+ actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx])
+ return actions
+
+ def prepare_state(self, batch):
+ """Pad state"""
+ state = batch[OBS_STATE][:, -1, :] if batch[OBS_STATE].ndim > 2 else batch[OBS_STATE]
+ state = pad_vector(state, self.config.max_state_dim)
+ return state
+
+ def prepare_action(self, batch):
+ """Pad action"""
+ actions = pad_vector(batch[ACTION], self.config.max_action_dim)
+ return actions
+
+
+def pad_tensor(tensor, max_len, pad_value=0):
+ """
+ Efficiently pads a tensor along sequence dimension to match max_len.
+
+ Args:
+ tensor (torch.Tensor): Shape (B, L, ...) or (B, L).
+ max_len (int): Fixed sequence length.
+ pad_value (int/float): Value for padding.
+
+ Returns:
+ torch.Tensor: Shape (B, max_len, ...) or (B, max_len).
+ """
+ b, d = tensor.shape[:2]
+
+ # Create a padded tensor of max_len and copy the existing values
+ padded_tensor = torch.full(
+ (b, max_len, *tensor.shape[2:]), pad_value, dtype=tensor.dtype, device=tensor.device
+ )
+ padded_tensor[:, :d] = tensor # Efficient in-place copy
+
+ return padded_tensor
+
+
+class VLAFlowMatching(nn.Module):
+ """
+ SmolVLA
+
+ [Paper]()
+
+ Designed by Hugging Face.
+ ┌──────────────────────────────┐
+ │ actions │
+ │ ▲ │
+ │ ┌─────────┐ ┌─|────┐ │
+ │ | │────► │ │ │
+ │ | │ kv │ │ │
+ │ | │────► │Action│ │
+ │ | VLM │cache │Expert│ |
+ │ │ │────► | │ │
+ │ │ │ │ │ │
+ │ └▲──▲───▲─┘ └───▲──┘ |
+ │ │ | | │ |
+ │ | | | noise │
+ │ │ │ state │
+ │ │ language tokens │
+ │ image(s) │
+ └──────────────────────────────┘
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.vlm_with_expert = SmolVLMWithExpertModel(
+ model_id=self.config.vlm_model_name,
+ freeze_vision_encoder=self.config.freeze_vision_encoder,
+ train_expert_only=self.config.train_expert_only,
+ load_vlm_weights=self.config.load_vlm_weights,
+ attention_mode=self.config.attention_mode,
+ num_expert_layers=self.config.num_expert_layers,
+ num_vlm_layers=self.config.num_vlm_layers,
+ self_attn_every_n_layers=self.config.self_attn_every_n_layers,
+ expert_width_multiplier=self.config.expert_width_multiplier,
+ )
+ self.state_proj = nn.Linear(
+ self.config.max_state_dim, self.vlm_with_expert.config.text_config.hidden_size
+ )
+ self.action_in_proj = nn.Linear(self.config.max_action_dim, self.vlm_with_expert.expert_hidden_size)
+ self.action_out_proj = nn.Linear(self.vlm_with_expert.expert_hidden_size, self.config.max_action_dim)
+
+ self.action_time_mlp_in = nn.Linear(
+ self.vlm_with_expert.expert_hidden_size * 2, self.vlm_with_expert.expert_hidden_size
+ )
+ self.action_time_mlp_out = nn.Linear(
+ self.vlm_with_expert.expert_hidden_size, self.vlm_with_expert.expert_hidden_size
+ )
+
+ self.set_requires_grad()
+ self.fake_image_token = self.vlm_with_expert.processor.tokenizer.fake_image_token_id
+ self.global_image_token = self.vlm_with_expert.processor.tokenizer.global_image_token_id
+ self.global_image_start_token = torch.tensor(
+ [self.fake_image_token, self.global_image_token], dtype=torch.long
+ )
+
+ self.add_image_special_tokens = self.config.add_image_special_tokens
+ self.image_end_token = torch.tensor([self.fake_image_token], dtype=torch.long)
+ self.prefix_length = self.config.prefix_length
+
+ def set_requires_grad(self):
+ for params in self.state_proj.parameters():
+ params.requires_grad = self.config.train_state_proj
+
+ def sample_noise(self, shape, device):
+ noise = torch.normal(
+ mean=0.0,
+ std=1.0,
+ size=shape,
+ dtype=torch.float32,
+ device=device,
+ )
+ return noise
+
+ def sample_time(self, bsize, device):
+ time_beta = sample_beta(1.5, 1.0, bsize, device)
+ time = time_beta * 0.999 + 0.001
+ return time.to(dtype=torch.float32, device=device)
+
+ def embed_prefix(
+ self, images, img_masks, lang_tokens, lang_masks, state: torch.Tensor = None
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Embed images with SigLIP and language tokens with embedding layer to prepare
+ for SmolVLM transformer processing.
+ """
+ embs = []
+ pad_masks = []
+ att_masks = []
+ for _img_idx, (
+ img,
+ img_mask,
+ ) in enumerate(zip(images, img_masks, strict=False)):
+ if self.add_image_special_tokens:
+ image_start_token = (
+ self.vlm_with_expert.embed_language_tokens(
+ self.global_image_start_token.to(device=self.vlm_with_expert.vlm.device)
+ )
+ .unsqueeze(0)
+ .expand(img.shape[0], -1, -1)
+ )
+ image_start_mask = torch.ones_like(
+ image_start_token[:, :, 0], dtype=torch.bool, device=image_start_token.device
+ )
+ att_masks += [0] * (image_start_mask.shape[-1])
+ embs.append(image_start_token)
+ pad_masks.append(image_start_mask)
+
+ img_emb = self.vlm_with_expert.embed_image(img)
+ img_emb = img_emb
+
+ # Normalize image embeddings
+ img_emb_dim = img_emb.shape[-1]
+ img_emb = img_emb * torch.tensor(img_emb_dim**0.5, dtype=img_emb.dtype, device=img_emb.device)
+
+ bsize, num_img_embs = img_emb.shape[:2]
+ img_mask = img_mask[:, None].expand(bsize, num_img_embs)
+
+ embs.append(img_emb)
+ pad_masks.append(img_mask)
+
+ att_masks += [0] * (num_img_embs)
+ if self.add_image_special_tokens:
+ image_end_token = (
+ self.vlm_with_expert.embed_language_tokens(
+ self.image_end_token.to(device=self.vlm_with_expert.vlm.device)
+ )
+ .unsqueeze(0)
+ .expand(img.shape[0], -1, -1)
+ )
+ image_end_mask = torch.ones_like(
+ image_end_token[:, :, 0], dtype=torch.bool, device=image_end_token.device
+ )
+ embs.append(image_end_token)
+ pad_masks.append(image_end_mask)
+ att_masks += [0] * (image_end_mask.shape[1])
+ lang_emb = self.vlm_with_expert.embed_language_tokens(lang_tokens)
+ # Normalize language embeddings
+ lang_emb_dim = lang_emb.shape[-1]
+ lang_emb = lang_emb * math.sqrt(lang_emb_dim)
+
+ embs.append(lang_emb)
+ pad_masks.append(lang_masks)
+
+ num_lang_embs = lang_emb.shape[1]
+ att_masks += [0] * num_lang_embs
+
+ state_emb = self.state_proj(state)
+ state_emb = state_emb[:, None, :] if state_emb.ndim == 2 else state_emb
+ embs.append(state_emb)
+ bsize = state_emb.shape[0]
+ device = state_emb.device
+
+ states_seq_len = state_emb.shape[1]
+ state_mask = torch.ones(bsize, states_seq_len, dtype=torch.bool, device=device)
+ pad_masks.append(state_mask)
+
+ # Set attention masks so that image and language inputs do not attend to state or actions
+ att_masks += [1] * (states_seq_len)
+ embs = torch.cat(embs, dim=1)
+ pad_masks = torch.cat(pad_masks, dim=1)
+ att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device)
+ att_masks = att_masks[None, :]
+
+ seq_len = pad_masks.shape[1]
+ if seq_len < self.prefix_length:
+ embs = pad_tensor(embs, self.prefix_length, pad_value=0)
+ pad_masks = pad_tensor(pad_masks, self.prefix_length, pad_value=0)
+ att_masks = pad_tensor(att_masks, self.prefix_length, pad_value=0)
+
+ att_masks = att_masks.expand(bsize, -1)
+
+ return embs, pad_masks, att_masks
+
+ def embed_suffix(self, noisy_actions, timestep):
+ """Embed state, noisy_actions, timestep to prepare for Expert Gemma processing."""
+ embs = []
+ pad_masks = []
+ att_masks = []
+
+ # Fuse timestep + action information using an MLP
+ action_emb = self.action_in_proj(noisy_actions)
+ device = action_emb.device
+ bsize = action_emb.shape[0]
+ dtype = action_emb.dtype
+ # Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
+ time_emb = create_sinusoidal_pos_embedding(
+ timestep,
+ self.vlm_with_expert.expert_hidden_size,
+ self.config.min_period,
+ self.config.max_period,
+ device=device,
+ )
+ time_emb = time_emb.type(dtype=dtype)
+
+ time_emb = time_emb[:, None, :].expand_as(action_emb)
+ action_time_emb = torch.cat([action_emb, time_emb], dim=2)
+
+ action_time_emb = self.action_time_mlp_in(action_time_emb)
+ action_time_emb = F.silu(action_time_emb) # swish == silu
+ action_time_emb = self.action_time_mlp_out(action_time_emb)
+
+ # Add to input tokens
+ embs.append(action_time_emb)
+
+ bsize, action_time_dim = action_time_emb.shape[:2]
+ action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device)
+ pad_masks.append(action_time_mask)
+
+ # Set attention masks so that image, language and state inputs do not attend to action tokens
+ att_masks += [1] * self.config.chunk_size
+ embs = torch.cat(embs, dim=1)
+ pad_masks = torch.cat(pad_masks, dim=1)
+ att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
+ att_masks = att_masks[None, :].expand(bsize, len(att_masks))
+ return embs, pad_masks, att_masks
+
+ def forward(
+ self, images, img_masks, lang_tokens, lang_masks, state, actions, noise=None, time=None
+ ) -> Tensor:
+ """Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)"""
+ if noise is None:
+ noise = self.sample_noise(actions.shape, actions.device)
+
+ if time is None:
+ time = self.sample_time(actions.shape[0], actions.device)
+
+ time_expanded = time[:, None, None]
+ x_t = time_expanded * noise + (1 - time_expanded) * actions
+ u_t = noise - actions
+ prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
+ images, img_masks, lang_tokens, lang_masks, state=state
+ )
+ suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(x_t, time)
+
+ pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
+ att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
+
+ att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
+ position_ids = torch.cumsum(pad_masks, dim=1) - 1
+ (_, suffix_out), _ = self.vlm_with_expert.forward(
+ attention_mask=att_2d_masks,
+ position_ids=position_ids,
+ past_key_values=None,
+ inputs_embeds=[prefix_embs, suffix_embs],
+ use_cache=False,
+ fill_kv_cache=False,
+ )
+ suffix_out = suffix_out[:, -self.config.chunk_size :]
+ # Original openpi code, upcast attention output
+ suffix_out = suffix_out.to(dtype=torch.float32)
+ v_t = self.action_out_proj(suffix_out)
+ losses = F.mse_loss(u_t, v_t, reduction="none")
+ return losses
+
+ def sample_actions(self, images, img_masks, lang_tokens, lang_masks, state, noise=None) -> Tensor:
+ """Do a full inference forward and compute the action (batch_size x num_steps x num_motors)"""
+ bsize = state.shape[0]
+ device = state.device
+
+ if noise is None:
+ actions_shape = (bsize, self.config.chunk_size, self.config.max_action_dim)
+ noise = self.sample_noise(actions_shape, device)
+
+ prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
+ images, img_masks, lang_tokens, lang_masks, state=state
+ )
+ prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
+ prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
+ # Compute image and language key value cache
+ _, past_key_values = self.vlm_with_expert.forward(
+ attention_mask=prefix_att_2d_masks,
+ position_ids=prefix_position_ids,
+ past_key_values=None,
+ inputs_embeds=[prefix_embs, None],
+ use_cache=self.config.use_cache,
+ fill_kv_cache=True,
+ )
+ dt = -1.0 / self.config.num_steps
+ dt = torch.tensor(dt, dtype=torch.float32, device=device)
+
+ x_t = noise
+ time = torch.tensor(1.0, dtype=torch.float32, device=device)
+ while time >= -dt / 2:
+ expanded_time = time.expand(bsize)
+ v_t = self.denoise_step(
+ prefix_pad_masks,
+ past_key_values,
+ x_t,
+ expanded_time,
+ )
+ # Euler step
+ x_t += dt * v_t
+ time += dt
+ return x_t
+
+ def denoise_step(
+ self,
+ prefix_pad_masks,
+ past_key_values,
+ x_t,
+ timestep,
+ ):
+ """Apply one denoising step of the noise `x_t` at a given timestep."""
+ suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(x_t, timestep)
+
+ suffix_len = suffix_pad_masks.shape[1]
+ batch_size = prefix_pad_masks.shape[0]
+ prefix_len = prefix_pad_masks.shape[1]
+ prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
+
+ suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
+
+ full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
+ prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
+ position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
+
+ outputs_embeds, _ = self.vlm_with_expert.forward(
+ attention_mask=full_att_2d_masks,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=[None, suffix_embs],
+ use_cache=self.config.use_cache,
+ fill_kv_cache=False,
+ )
+ suffix_out = outputs_embeds[1]
+ suffix_out = suffix_out[:, -self.config.chunk_size :]
+ suffix_out = suffix_out.to(dtype=torch.float32)
+ v_t = self.action_out_proj(suffix_out)
+ return v_t
diff --git a/src/lerobot/policies/smolvla/smolvlm_with_expert.py b/src/lerobot/policies/smolvla/smolvlm_with_expert.py
new file mode 100644
index 0000000000..f3d1a693a2
--- /dev/null
+++ b/src/lerobot/policies/smolvla/smolvlm_with_expert.py
@@ -0,0 +1,549 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import torch
+from torch import nn
+from transformers import (
+ AutoConfig,
+ AutoModel,
+ AutoModelForImageTextToText,
+ AutoProcessor,
+ SmolVLMForConditionalGeneration,
+)
+
+
+def apply_rope(x, positions, max_wavelength=10_000):
+ """
+ Applies RoPE positions [B, L] to x [B, L, H, D].
+ """
+ d_half = x.shape[-1] // 2
+ device = x.device
+ dtype = x.dtype
+ x = x.to(torch.float32)
+
+ freq_exponents = (2.0 / x.shape[-1]) * torch.arange(d_half, dtype=torch.float32, device=device)
+ timescale = max_wavelength**freq_exponents
+ radians = positions[..., None].to(torch.float32) / timescale[None, None, :].to(torch.float32)
+
+ radians = radians[..., None, :]
+
+ sin = torch.sin(radians) # .to(dtype=dtype)
+ cos = torch.cos(radians) # .to(dtype=dtype)
+
+ x1, x2 = x.split(d_half, dim=-1)
+ res = torch.empty_like(x)
+ res[..., :d_half] = x1 * cos - x2 * sin
+ res[..., d_half:] = x2 * cos + x1 * sin
+
+ return res.to(dtype)
+
+
+def get_intermediate_size(hidden_dim, ffn_dim_multiplier=4, multiple_of=256):
+ hidden_dim = int(2 * hidden_dim / 3)
+ hidden_dim = int(ffn_dim_multiplier * hidden_dim)
+ hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
+ return hidden_dim
+
+
+class SmolVLMWithExpertModel(nn.Module):
+ def __init__(
+ self,
+ model_id: str = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
+ load_vlm_weights: bool = True,
+ train_expert_only: bool = True,
+ freeze_vision_encoder: bool = False,
+ attention_mode: str = "self_attn",
+ num_expert_layers: int = -1,
+ num_vlm_layers: int = -1,
+ self_attn_every_n_layers: int = -1,
+ expert_width_multiplier: float = 0.5,
+ ):
+ super().__init__()
+ if load_vlm_weights:
+ print(f"Loading {model_id} weights ...")
+ self.vlm = AutoModelForImageTextToText.from_pretrained(
+ model_id,
+ device_map="auto",
+ torch_dtype="bfloat16",
+ low_cpu_mem_usage=True,
+ )
+ config = self.vlm.config
+ else:
+ config = AutoConfig.from_pretrained(model_id)
+ self.vlm = SmolVLMForConditionalGeneration(config=config)
+ self.processor = AutoProcessor.from_pretrained(model_id)
+ if num_vlm_layers > 0:
+ print(f"Reducing the number of VLM layers to {num_vlm_layers} ...")
+ self.get_vlm_model().text_model.layers = self.get_vlm_model().text_model.layers[:num_vlm_layers]
+ self.num_vlm_layers = len(self.get_vlm_model().text_model.layers)
+ self.config = config
+ # Smaller lm expert
+ lm_expert_config = copy.deepcopy(config.text_config)
+ hidden_size = lm_expert_config.hidden_size
+ lm_expert_config.hidden_size = int(hidden_size * expert_width_multiplier) # hidden_size // 2
+ lm_expert_config.intermediate_size = get_intermediate_size(int(hidden_size * expert_width_multiplier))
+ lm_expert_config.num_hidden_layers = self.num_vlm_layers
+ if num_expert_layers > 0:
+ assert len(self.get_vlm_model().text_model.layers) % num_expert_layers == 0, (
+ f"Number of layers in the VLM {len(self.get_vlm_model().text_model.layers)} are not multiple of num_expert_layers {num_expert_layers}"
+ )
+ lm_expert_config.num_hidden_layers = num_expert_layers
+ self.lm_expert = AutoModel.from_config(lm_expert_config)
+
+ self.num_expert_layers = len(self.lm_expert.layers)
+ self.self_attn_every_n_layers = self_attn_every_n_layers
+ if "cross" in attention_mode:
+ # Reshape qkv projections to have the same input dimension as the vlm
+ for layer_idx in range(len(self.lm_expert.layers)):
+ if self.self_attn_every_n_layers > 0 and layer_idx % self.self_attn_every_n_layers == 0:
+ continue
+ self.lm_expert.layers[layer_idx].self_attn.k_proj = nn.Linear(
+ config.text_config.num_key_value_heads * config.text_config.head_dim,
+ lm_expert_config.num_key_value_heads * lm_expert_config.head_dim,
+ bias=lm_expert_config.attention_bias,
+ )
+ self.lm_expert.layers[layer_idx].self_attn.v_proj = nn.Linear(
+ config.text_config.num_key_value_heads * config.text_config.head_dim,
+ lm_expert_config.num_key_value_heads * lm_expert_config.head_dim,
+ bias=lm_expert_config.attention_bias,
+ )
+ # Remove unused embed_tokens
+ self.lm_expert.embed_tokens = None
+
+ self.num_attention_heads = self.config.text_config.num_attention_heads
+ self.num_key_value_heads = self.config.text_config.num_key_value_heads
+
+ self.freeze_vision_encoder = freeze_vision_encoder
+ self.train_expert_only = train_expert_only
+ self.attention_mode = attention_mode
+ self.expert_hidden_size = lm_expert_config.hidden_size
+ self.set_requires_grad()
+
+ def get_vlm_model(self):
+ return self.vlm.model
+
+ def set_requires_grad(self):
+ if self.freeze_vision_encoder:
+ self.get_vlm_model().vision_model.eval()
+ for params in self.get_vlm_model().vision_model.parameters():
+ params.requires_grad = False
+ if self.train_expert_only:
+ self.vlm.eval()
+ for params in self.vlm.parameters():
+ params.requires_grad = False
+ else:
+ # To avoid unused params issue with distributed training
+ last_layers = [self.num_vlm_layers - 1]
+ if (
+ self.num_vlm_layers != self.num_expert_layers
+ and self.num_vlm_layers % self.num_expert_layers == 0
+ ):
+ last_layers.append(self.num_vlm_layers - 2)
+ frozen_layers = [
+ "lm_head",
+ "text_model.model.norm.weight",
+ ]
+ for layer in last_layers:
+ frozen_layers.append(f"text_model.model.layers.{layer}.")
+
+ for name, params in self.vlm.named_parameters():
+ if any(k in name for k in frozen_layers):
+ params.requires_grad = False
+ # To avoid unused params issue with distributed training
+ for name, params in self.lm_expert.named_parameters():
+ if "lm_head" in name:
+ params.requires_grad = False
+
+ def train(self, mode: bool = True):
+ super().train(mode)
+
+ if self.freeze_vision_encoder:
+ self.get_vlm_model().vision_model.eval()
+
+ if self.train_expert_only:
+ self.vlm.eval()
+
+ def embed_image(self, image: torch.Tensor):
+ patch_attention_mask = None
+ # Get sequence from the vision encoder
+ image_hidden_states = (
+ self.get_vlm_model()
+ .vision_model(
+ pixel_values=image.to(dtype=self.get_vlm_model().vision_model.dtype),
+ patch_attention_mask=patch_attention_mask,
+ )
+ .last_hidden_state
+ )
+ # Modality projection & resampling
+ image_hidden_states = self.get_vlm_model().connector(image_hidden_states)
+ return image_hidden_states
+
+ def embed_language_tokens(self, tokens: torch.Tensor):
+ return self.get_vlm_model().text_model.get_input_embeddings()(tokens)
+
+ def forward_attn_layer(
+ self,
+ model_layers,
+ inputs_embeds,
+ layer_idx,
+ position_ids,
+ attention_mask,
+ batch_size,
+ head_dim,
+ use_cache: bool = True,
+ fill_kv_cache: bool = True,
+ past_key_values=None,
+ ) -> list[torch.Tensor]:
+ query_states = []
+ key_states = []
+ value_states = []
+ for i, hidden_states in enumerate(inputs_embeds):
+ layer = model_layers[i][layer_idx]
+ if hidden_states is None or layer is None:
+ continue
+ hidden_states = layer.input_layernorm(hidden_states)
+
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
+
+ hidden_states = hidden_states.to(dtype=layer.self_attn.q_proj.weight.dtype)
+ query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape)
+ key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape)
+ value_state = layer.self_attn.v_proj(hidden_states).view(hidden_shape)
+
+ query_states.append(query_state)
+ key_states.append(key_state)
+ value_states.append(value_state)
+
+ # B,L,H,D with L sequence length, H number of heads, D head dim
+ # concatenate on the number of embeddings/tokens
+ query_states = torch.cat(query_states, dim=1)
+ key_states = torch.cat(key_states, dim=1)
+ value_states = torch.cat(value_states, dim=1)
+ seq_len = query_states.shape[1]
+ if seq_len < position_ids.shape[1]:
+ _position_ids = position_ids[:, :seq_len]
+ _attention_mask = attention_mask[:, :seq_len, :seq_len]
+ else:
+ _position_ids = position_ids
+ _attention_mask = attention_mask
+
+ attention_mask_ = _attention_mask
+ position_ids_ = _position_ids
+
+ query_states = apply_rope(query_states, position_ids_)
+ key_states = apply_rope(key_states, position_ids_)
+
+ if use_cache and past_key_values is None:
+ past_key_values = {}
+
+ if use_cache:
+ if fill_kv_cache:
+ past_key_values[layer_idx] = {
+ "key_states": key_states,
+ "value_states": value_states,
+ }
+ else:
+ # TODO here, some optimization can be done - similar to a `StaticCache` we can declare the `max_len` before.
+ # so we create an empty cache, with just one cuda malloc, and if (in autoregressive case) we reach
+ # the max len, then we (for instance) double the cache size. This implementation already exists
+ # in `transformers`. (molbap)
+ key_states = torch.cat([past_key_values[layer_idx]["key_states"], key_states], dim=1)
+ value_states = torch.cat([past_key_values[layer_idx]["value_states"], value_states], dim=1)
+
+ attention_interface = self.get_attention_interface()
+
+ att_output = attention_interface(
+ attention_mask_, batch_size, head_dim, query_states, key_states, value_states
+ )
+ return [att_output], past_key_values
+
+ def forward_cross_attn_layer(
+ self,
+ model_layers,
+ inputs_embeds,
+ layer_idx,
+ position_ids,
+ attention_mask,
+ batch_size,
+ head_dim,
+ use_cache: bool = True,
+ fill_kv_cache: bool = True,
+ past_key_values=None,
+ ) -> list[torch.Tensor]:
+ attention_interface = self.get_attention_interface()
+
+ att_outputs = []
+ assert len(inputs_embeds) == 2 or (use_cache and past_key_values is not None and not fill_kv_cache), (
+ f"Both len(inputs_embeds) == {len(inputs_embeds)} and past_key_values is {past_key_values}"
+ )
+
+ if len(inputs_embeds) == 2 and not past_key_values:
+ # Prefix attention
+ seq_len = inputs_embeds[0].shape[1]
+ position_id, expert_position_id = position_ids[:, :seq_len], position_ids[:, seq_len:]
+ prefix_attention_mask = attention_mask[:, :seq_len, :seq_len]
+
+ layer = model_layers[0][layer_idx]
+
+ hidden_states = layer.input_layernorm(inputs_embeds[0])
+
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
+
+ hidden_states = hidden_states.to(dtype=layer.self_attn.q_proj.weight.dtype)
+ query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape)
+ key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape)
+ value_states = layer.self_attn.v_proj(hidden_states).view(hidden_shape)
+
+ # B,L,H,D with L sequence length, H number of heads, D head dim
+ query_states = apply_rope(query_state, position_id)
+ key_states = apply_rope(key_state, position_id)
+
+ att_output = attention_interface(
+ prefix_attention_mask, batch_size, head_dim, query_states, key_states, value_states
+ )
+ att_outputs.append(att_output)
+ else:
+ expert_position_id = position_ids
+
+ if use_cache and past_key_values is None:
+ past_key_values = {}
+
+ if use_cache:
+ if fill_kv_cache:
+ past_key_values[layer_idx] = {
+ "key_states": key_states,
+ "value_states": value_states,
+ }
+ else:
+ # TODO here, some optimization can be done - similar to a `StaticCache` we can declare the `max_len` before.
+ # so we create an empty cache, with just one cuda malloc, and if (in autoregressive case) we reach
+ # the max len, then we (for instance) double the cache size. This implementation already exists
+ # in `transformers`. (molbap)
+ key_states = past_key_values[layer_idx]["key_states"]
+ value_states = past_key_values[layer_idx]["value_states"]
+
+ # Expert
+ expert_layer = model_layers[1][layer_idx]
+ if expert_layer is not None:
+ expert_hidden_states = expert_layer.input_layernorm(inputs_embeds[1])
+
+ expert_input_shape = expert_hidden_states.shape[:-1]
+ expert_hidden_shape = (*expert_input_shape, -1, expert_layer.self_attn.head_dim)
+
+ expert_hidden_states = expert_hidden_states.to(dtype=expert_layer.self_attn.q_proj.weight.dtype)
+ expert_query_state = expert_layer.self_attn.q_proj(expert_hidden_states).view(expert_hidden_shape)
+
+ _key_states = key_states.to(dtype=expert_layer.self_attn.k_proj.weight.dtype).view(
+ *key_states.shape[:2], -1
+ )
+ expert_key_states = expert_layer.self_attn.k_proj(_key_states).view(
+ *_key_states.shape[:-1], -1, expert_layer.self_attn.head_dim
+ ) # k_proj should have same dim as kv
+
+ _value_states = value_states.to(dtype=expert_layer.self_attn.v_proj.weight.dtype).view(
+ *value_states.shape[:2], -1
+ )
+ expert_value_states = expert_layer.self_attn.v_proj(_value_states).view(
+ *_value_states.shape[:-1], -1, expert_layer.self_attn.head_dim
+ )
+
+ expert_position_id = (
+ expert_position_id - torch.min(expert_position_id, dim=1, keepdim=True).values
+ ) # start from 0
+ expert_attention_mask = attention_mask[
+ :, -inputs_embeds[1].shape[1] :, : expert_key_states.shape[1] :
+ ] # take into account kv
+
+ expert_query_states = apply_rope(expert_query_state, expert_position_id)
+
+ att_output = attention_interface(
+ expert_attention_mask,
+ batch_size,
+ head_dim,
+ expert_query_states,
+ expert_key_states,
+ expert_value_states,
+ )
+ att_outputs.append(att_output)
+ else:
+ att_outputs.append(None)
+
+ # att_output = att_output.to(dtype=models[i].dtype)
+ return att_outputs, past_key_values
+
+ def get_model_layers(self, models: list) -> list:
+ vlm_layers = []
+ expert_layers = []
+ multiple_of = self.num_vlm_layers // self.num_expert_layers
+ for i in range(self.num_vlm_layers):
+ if multiple_of > 0 and i > 0 and i % multiple_of != 0:
+ expert_layer = None
+ else:
+ expert_layer_index = i // multiple_of if multiple_of > 0 else i
+ expert_layer = models[1].layers[expert_layer_index]
+ vlm_layers.append(models[0].layers[i])
+ expert_layers.append(expert_layer)
+ return [vlm_layers, expert_layers]
+
+ def forward(
+ self,
+ attention_mask: torch.Tensor | None = None,
+ position_ids: torch.LongTensor | None = None,
+ past_key_values: list[torch.FloatTensor] | None = None,
+ inputs_embeds: list[torch.FloatTensor] = None,
+ use_cache: bool | None = None,
+ fill_kv_cache: bool | None = None,
+ ):
+ models = [self.get_vlm_model().text_model, self.lm_expert]
+ model_layers = self.get_model_layers(models)
+ for hidden_states in inputs_embeds:
+ # TODO this is very inefficient
+ # dtype is always the same, batch size too (if > 1 len)
+ # device could be trickier in multi gpu edge cases but that's it
+ if hidden_states is None:
+ continue
+ batch_size = hidden_states.shape[0]
+
+ # RMSNorm
+ num_layers = self.num_vlm_layers
+ head_dim = self.vlm.config.text_config.head_dim
+ for layer_idx in range(num_layers):
+ if (
+ fill_kv_cache
+ or "cross" not in self.attention_mode
+ or (self.self_attn_every_n_layers > 0 and layer_idx % self.self_attn_every_n_layers == 0)
+ ):
+ att_outputs, past_key_values = self.forward_attn_layer(
+ model_layers,
+ inputs_embeds,
+ layer_idx,
+ position_ids,
+ attention_mask,
+ batch_size,
+ head_dim,
+ use_cache=use_cache,
+ fill_kv_cache=fill_kv_cache,
+ past_key_values=past_key_values,
+ )
+ else:
+ att_outputs, past_key_values = self.forward_cross_attn_layer(
+ model_layers,
+ inputs_embeds,
+ layer_idx,
+ position_ids,
+ attention_mask,
+ batch_size,
+ head_dim,
+ use_cache=use_cache,
+ fill_kv_cache=fill_kv_cache,
+ past_key_values=past_key_values,
+ )
+ outputs_embeds = []
+ start = 0
+ for i, hidden_states in enumerate(inputs_embeds):
+ layer = model_layers[i][layer_idx]
+ att_output = (
+ att_outputs[i] if i < len(att_outputs) else att_outputs[0]
+ ) # in case of self_attn
+ if hidden_states is not None:
+ if layer is None:
+ outputs_embeds.append(hidden_states)
+ continue
+ end = start + hidden_states.shape[1]
+
+ if att_output.dtype != layer.self_attn.o_proj.weight.dtype:
+ att_output = att_output.to(layer.self_attn.o_proj.weight.dtype)
+ att_out = att_output[:, start:end]
+ out_emb = layer.self_attn.o_proj(att_out)
+
+ out_emb += hidden_states
+ after_first_residual = out_emb.clone()
+
+ out_emb = layer.post_attention_layernorm(out_emb)
+ out_emb = layer.mlp(out_emb)
+
+ out_emb += after_first_residual
+
+ outputs_embeds.append(out_emb)
+
+ start = end if len(att_outputs) == 1 else 0
+ else:
+ outputs_embeds.append(None)
+
+ inputs_embeds = outputs_embeds
+
+ # final norm
+ outputs_embeds = []
+ for i, hidden_states in enumerate(inputs_embeds):
+ if hidden_states is not None:
+ out_emb = models[i].norm(hidden_states)
+ outputs_embeds.append(out_emb)
+ else:
+ outputs_embeds.append(None)
+ return outputs_embeds, past_key_values
+
+ def get_attention_interface(self):
+ attention_interface = self.eager_attention_forward
+ return attention_interface
+
+ def eager_attention_forward(
+ self, attention_mask, batch_size, head_dim, query_states, key_states, value_states
+ ):
+ num_att_heads = self.num_attention_heads
+ num_key_value_heads = self.num_key_value_heads
+ num_key_value_groups = num_att_heads // num_key_value_heads
+
+ sequence_length = key_states.shape[1]
+
+ key_states = key_states[:, :, :, None, :].expand(
+ batch_size, sequence_length, num_key_value_heads, num_key_value_groups, head_dim
+ )
+ key_states = key_states.reshape(
+ batch_size, sequence_length, num_key_value_heads * num_key_value_groups, head_dim
+ )
+
+ value_states = value_states[:, :, :, None, :].expand(
+ batch_size, sequence_length, num_key_value_heads, num_key_value_groups, head_dim
+ )
+ value_states = value_states.reshape(
+ batch_size, sequence_length, num_key_value_heads * num_key_value_groups, head_dim
+ )
+
+ # Attention here is upcasted to float32 to match the original eager implementation.
+ query_states = query_states.to(dtype=torch.float32)
+ key_states = key_states.to(dtype=torch.float32)
+
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+
+ att_weights = torch.matmul(query_states, key_states.transpose(2, 3))
+ att_weights *= head_dim**-0.5
+
+ att_weights = att_weights.to(dtype=torch.float32)
+ big_neg = torch.finfo(att_weights.dtype).min # -2.3819763e38 # See gemma/modules.py
+ masked_att_weights = torch.where(attention_mask[:, None, :, :], att_weights, big_neg)
+ probs = nn.functional.softmax(masked_att_weights, dim=-1)
+ probs = probs.to(dtype=value_states.dtype)
+
+ att_output = torch.matmul(probs, value_states.permute(0, 2, 1, 3))
+
+ att_output = att_output.permute(0, 2, 1, 3)
+ # we use -1 because sequence length can change
+ att_output = att_output.reshape(batch_size, -1, num_key_value_heads * num_key_value_groups * head_dim)
+
+ return att_output
diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/src/lerobot/policies/tdmpc/configuration_tdmpc.py
similarity index 84%
rename from lerobot/common/policies/tdmpc/configuration_tdmpc.py
rename to src/lerobot/policies/tdmpc/configuration_tdmpc.py
index 4a5415a156..3c1a29932b 100644
--- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py
+++ b/src/lerobot/policies/tdmpc/configuration_tdmpc.py
@@ -16,9 +16,14 @@
# limitations under the License.
from dataclasses import dataclass, field
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import NormalizationMode
+from lerobot.optim.optimizers import AdamConfig
+
+@PreTrainedConfig.register_subclass("tdmpc")
@dataclass
-class TDMPCConfig:
+class TDMPCConfig(PreTrainedConfig):
"""Configuration class for TDMPCPolicy.
Defaults are configured for training with xarm_lift_medium_replay providing proprioceptive and single
@@ -71,7 +76,7 @@ class TDMPCConfig:
n_pi_samples: Number of samples to draw from the policy / world model rollout every CEM iteration. Can
be zero.
uncertainty_regularizer_coeff: Coefficient for the uncertainty regularization used when estimating
- trajectory values (this is the λ coeffiecient in eqn 4 of FOWM).
+ trajectory values (this is the λ coefficient in eqn 4 of FOWM).
n_elites: The number of elite samples to use for updating the gaussian parameters every CEM iteration.
elite_weighting_temperature: The temperature to use for softmax weighting (by trajectory value) of the
elites, when updating the gaussian parameters for CEM.
@@ -102,28 +107,20 @@ class TDMPCConfig:
"""
# Input / output structure.
+ n_obs_steps: int = 1
n_action_repeats: int = 2
horizon: int = 5
n_action_steps: int = 1
- input_shapes: dict[str, list[int]] = field(
- default_factory=lambda: {
- "observation.image": [3, 84, 84],
- "observation.state": [4],
- }
- )
- output_shapes: dict[str, list[int]] = field(
+ normalization_mapping: dict[str, NormalizationMode] = field(
default_factory=lambda: {
- "action": [4],
+ "VISUAL": NormalizationMode.IDENTITY,
+ "STATE": NormalizationMode.IDENTITY,
+ "ENV": NormalizationMode.IDENTITY,
+ "ACTION": NormalizationMode.MIN_MAX,
}
)
- # Normalization / Unnormalization
- input_normalization_modes: dict[str, str] | None = None
- output_normalization_modes: dict[str, str] = field(
- default_factory=lambda: {"action": "min_max"},
- )
-
# Architecture / modeling.
# Neural networks.
image_encoder_hidden_dim: int = 32
@@ -159,32 +156,27 @@ class TDMPCConfig:
# Target model.
target_model_momentum: float = 0.995
+ # Training presets
+ optimizer_lr: float = 3e-4
+
def __post_init__(self):
+ super().__post_init__()
+
"""Input validation (not exhaustive)."""
- # There should only be one image key.
- image_keys = {k for k in self.input_shapes if k.startswith("observation.image")}
- if len(image_keys) > 1:
- raise ValueError(
- f"{self.__class__.__name__} handles at most one image for now. Got image keys {image_keys}."
- )
- if len(image_keys) > 0:
- image_key = next(iter(image_keys))
- if self.input_shapes[image_key][-2] != self.input_shapes[image_key][-1]:
- # TODO(alexander-soare): This limitation is solely because of code in the random shift
- # augmentation. It should be able to be removed.
- raise ValueError(
- f"Only square images are handled now. Got image shape {self.input_shapes[image_key]}."
- )
if self.n_gaussian_samples <= 0:
raise ValueError(
- f"The number of guassian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`"
+ f"The number of gaussian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`"
)
- if self.output_normalization_modes != {"action": "min_max"}:
+ if self.normalization_mapping["ACTION"] is not NormalizationMode.MIN_MAX:
raise ValueError(
"TD-MPC assumes the action space dimensions to all be in [-1, 1]. Therefore it is strongly "
f"advised that you stick with the default. See {self.__class__.__name__} docstring for more "
"information."
)
+ if self.n_obs_steps != 1:
+ raise ValueError(
+ f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`"
+ )
if self.n_action_steps > 1:
if self.n_action_repeats != 1:
raise ValueError(
@@ -194,3 +186,35 @@ def __post_init__(self):
raise ValueError("If `n_action_steps > 1`, `use_mpc` must be set to `True`.")
if self.n_action_steps > self.horizon:
raise ValueError("`n_action_steps` must be less than or equal to `horizon`.")
+
+ def get_optimizer_preset(self) -> AdamConfig:
+ return AdamConfig(lr=self.optimizer_lr)
+
+ def get_scheduler_preset(self) -> None:
+ return None
+
+ def validate_features(self) -> None:
+ # There should only be one image key.
+ if len(self.image_features) > 1:
+ raise ValueError(
+ f"{self.__class__.__name__} handles at most one image for now. Got image keys {self.image_features}."
+ )
+
+ if len(self.image_features) > 0:
+ image_ft = next(iter(self.image_features.values()))
+ if image_ft.shape[-2] != image_ft.shape[-1]:
+ # TODO(alexander-soare): This limitation is solely because of code in the random shift
+ # augmentation. It should be able to be removed.
+ raise ValueError(f"Only square images are handled now. Got image shape {image_ft.shape}.")
+
+ @property
+ def observation_delta_indices(self) -> list:
+ return list(range(self.horizon + 1))
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(self.horizon))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return list(range(self.horizon))
diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/src/lerobot/policies/tdmpc/modeling_tdmpc.py
similarity index 84%
rename from lerobot/common/policies/tdmpc/modeling_tdmpc.py
rename to src/lerobot/policies/tdmpc/modeling_tdmpc.py
index d97c4824c4..664fe863dd 100644
--- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py
+++ b/src/lerobot/policies/tdmpc/modeling_tdmpc.py
@@ -17,37 +17,32 @@
"""Implementation of Finetuning Offline World Models in the Real World.
The comments in this code may sometimes refer to these references:
- TD-MPC paper: Temporal Difference Learning for Model Predictive Control (https://arxiv.org/abs/2203.04955)
- FOWM paper: Finetuning Offline World Models in the Real World (https://arxiv.org/abs/2310.16029)
+ TD-MPC paper: Temporal Difference Learning for Model Predictive Control (https://huggingface.co/papers/2203.04955)
+ FOWM paper: Finetuning Offline World Models in the Real World (https://huggingface.co/papers/2310.16029)
"""
# ruff: noqa: N806
from collections import deque
+from collections.abc import Callable
from copy import deepcopy
from functools import partial
-from typing import Callable
import einops
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F # noqa: N812
-from huggingface_hub import PyTorchModelHubMixin
from torch import Tensor
-from lerobot.common.policies.normalize import Normalize, Unnormalize
-from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig
-from lerobot.common.policies.utils import get_device_from_parameters, populate_queues
+from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_STATE, REWARD
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
+from lerobot.policies.utils import get_device_from_parameters, get_output_shape, populate_queues
-class TDMPCPolicy(
- nn.Module,
- PyTorchModelHubMixin,
- library_name="lerobot",
- repo_url="https://github.com/huggingface/lerobot",
- tags=["robotics", "tdmpc"],
-):
+class TDMPCPolicy(PreTrainedPolicy):
"""Implementation of TD-MPC learning + inference.
Please note several warnings for this policy.
@@ -65,11 +60,10 @@ class TDMPCPolicy(
match our xarm environment.
"""
+ config_class = TDMPCConfig
name = "tdmpc"
- def __init__(
- self, config: TDMPCConfig | None = None, dataset_stats: dict[str, dict[str, Tensor]] | None = None
- ):
+ def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None):
"""
Args:
config: Policy configuration class instance or None, in which case the default instantiation of
@@ -77,42 +71,28 @@ def __init__(
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
that they will be passed with a call to `load_state_dict` before the policy is used.
"""
- super().__init__()
-
- if config is None:
- config = TDMPCConfig()
+ super().__init__(config)
+ config.validate_features()
self.config = config
- self.model = TDMPCTOLD(config)
- self.model_target = deepcopy(self.model)
- for param in self.model_target.parameters():
- param.requires_grad = False
- if config.input_normalization_modes is not None:
- self.normalize_inputs = Normalize(
- config.input_shapes, config.input_normalization_modes, dataset_stats
- )
- else:
- self.normalize_inputs = nn.Identity()
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
self.normalize_targets = Normalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
self.unnormalize_outputs = Unnormalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
- image_keys = [k for k in config.input_shapes if k.startswith("observation.image")]
- # Note: This check is covered in the post-init of the config but have a sanity check just in case.
- self._use_image = False
- self._use_env_state = False
- if len(image_keys) > 0:
- assert len(image_keys) == 1
- self._use_image = True
- self.input_image_key = image_keys[0]
- if "observation.environment_state" in config.input_shapes:
- self._use_env_state = True
+ self.model = TDMPCTOLD(config)
+ self.model_target = deepcopy(self.model)
+ for param in self.model_target.parameters():
+ param.requires_grad = False
self.reset()
+ def get_optim_params(self) -> dict:
+ return self.parameters()
+
def reset(self):
"""
Clear observation and action queues. Clear previous means for warm starting of MPPI/CEM. Should be
@@ -122,60 +102,66 @@ def reset(self):
"observation.state": deque(maxlen=1),
"action": deque(maxlen=max(self.config.n_action_steps, self.config.n_action_repeats)),
}
- if self._use_image:
+ if self.config.image_features:
self._queues["observation.image"] = deque(maxlen=1)
- if self._use_env_state:
+ if self.config.env_state_feature:
self._queues["observation.environment_state"] = deque(maxlen=1)
# Previous mean obtained from the cross-entropy method (CEM) used during MPC. It is used to warm start
# CEM for the next step.
self._prev_mean: torch.Tensor | None = None
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ """Predict a chunk of actions given environment observations."""
+ batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch if key in self._queues}
+
+ # Remove the time dimensions as it is not handled yet.
+ for key in batch:
+ assert batch[key].shape[1] == 1
+ batch[key] = batch[key][:, 0]
+
+ # NOTE: Order of observations matters here.
+ encode_keys = []
+ if self.config.image_features:
+ encode_keys.append(OBS_IMAGE)
+ if self.config.env_state_feature:
+ encode_keys.append(OBS_ENV_STATE)
+ encode_keys.append(OBS_STATE)
+ z = self.model.encode({k: batch[k] for k in encode_keys})
+ if self.config.use_mpc: # noqa: SIM108
+ actions = self.plan(z) # (horizon, batch, action_dim)
+ else:
+ # Plan with the policy (π) alone. This always returns one action so unsqueeze to get a
+ # sequence dimension like in the MPC branch.
+ actions = self.model.pi(z).unsqueeze(0)
+
+ actions = torch.clamp(actions, -1, +1)
+
+ actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
+ return actions
+
@torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select a single action given environment observations."""
batch = self.normalize_inputs(batch)
- if self._use_image:
+ if self.config.image_features:
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.image"] = batch[self.input_image_key]
+ batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))]
self._queues = populate_queues(self._queues, batch)
# When the action queue is depleted, populate it again by querying the policy.
- if len(self._queues["action"]) == 0:
- batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch}
-
- # Remove the time dimensions as it is not handled yet.
- for key in batch:
- assert batch[key].shape[1] == 1
- batch[key] = batch[key][:, 0]
-
- # NOTE: Order of observations matters here.
- encode_keys = []
- if self._use_image:
- encode_keys.append("observation.image")
- if self._use_env_state:
- encode_keys.append("observation.environment_state")
- encode_keys.append("observation.state")
- z = self.model.encode({k: batch[k] for k in encode_keys})
- if self.config.use_mpc: # noqa: SIM108
- actions = self.plan(z) # (horizon, batch, action_dim)
- else:
- # Plan with the policy (π) alone. This always returns one action so unsqueeze to get a
- # sequence dimension like in the MPC branch.
- actions = self.model.pi(z).unsqueeze(0)
-
- actions = torch.clamp(actions, -1, +1)
-
- actions = self.unnormalize_outputs({"action": actions})["action"]
+ if len(self._queues[ACTION]) == 0:
+ actions = self.predict_action_chunk(batch)
if self.config.n_action_repeats > 1:
for _ in range(self.config.n_action_repeats):
- self._queues["action"].append(actions[0])
+ self._queues[ACTION].append(actions[0])
else:
# Action queue is (n_action_steps, batch_size, action_dim), so we transpose the action.
- self._queues["action"].extend(actions[: self.config.n_action_steps])
+ self._queues[ACTION].extend(actions[: self.config.n_action_steps])
- action = self._queues["action"].popleft()
+ action = self._queues[ACTION].popleft()
return action
@torch.no_grad()
@@ -196,7 +182,7 @@ def plan(self, z: Tensor) -> Tensor:
self.config.horizon,
self.config.n_pi_samples,
batch_size,
- self.config.output_shapes["action"][0],
+ self.config.action_feature.shape[0],
device=device,
)
if self.config.n_pi_samples > 0:
@@ -215,7 +201,7 @@ def plan(self, z: Tensor) -> Tensor:
# algorithm.
# The initial mean and standard deviation for the cross-entropy method (CEM).
mean = torch.zeros(
- self.config.horizon, batch_size, self.config.output_shapes["action"][0], device=device
+ self.config.horizon, batch_size, self.config.action_feature.shape[0], device=device
)
# Maybe warm start CEM with the mean from the previous step.
if self._prev_mean is not None:
@@ -228,7 +214,7 @@ def plan(self, z: Tensor) -> Tensor:
self.config.horizon,
self.config.n_gaussian_samples,
batch_size,
- self.config.output_shapes["action"][0],
+ self.config.action_feature.shape[0],
device=std.device,
)
gaussian_actions = torch.clamp(mean.unsqueeze(1) + std.unsqueeze(1) * std_normal_noise, -1, 1)
@@ -322,7 +308,7 @@ def estimate_value(self, z: Tensor, actions: Tensor):
G -= running_discount * self.config.uncertainty_regularizer_coeff * terminal_values.std(0)
return G
- def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]:
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
"""Run the batch through the model and compute the loss.
Returns a dictionary with loss as a tensor, and other information as native floats.
@@ -330,27 +316,27 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]:
device = get_device_from_parameters(self)
batch = self.normalize_inputs(batch)
- if self._use_image:
+ if self.config.image_features:
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.image"] = batch[self.input_image_key]
+ batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))]
batch = self.normalize_targets(batch)
info = {}
# (b, t) -> (t, b)
for key in batch:
- if batch[key].ndim > 1:
+ if isinstance(batch[key], torch.Tensor) and batch[key].ndim > 1:
batch[key] = batch[key].transpose(1, 0)
- action = batch["action"] # (t, b, action_dim)
- reward = batch["next.reward"] # (t, b)
+ action = batch[ACTION] # (t, b, action_dim)
+ reward = batch[REWARD] # (t, b)
observations = {k: v for k, v in batch.items() if k.startswith("observation.")}
# Apply random image augmentations.
- if self._use_image and self.config.max_random_shift_ratio > 0:
- observations["observation.image"] = flatten_forward_unflatten(
+ if self.config.image_features and self.config.max_random_shift_ratio > 0:
+ observations[OBS_IMAGE] = flatten_forward_unflatten(
partial(random_shifts_aug, max_random_shift_ratio=self.config.max_random_shift_ratio),
- observations["observation.image"],
+ observations[OBS_IMAGE],
)
# Get the current observation for predicting trajectories, and all future observations for use in
@@ -360,7 +346,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]:
current_observation[k] = observations[k][0]
next_observations[k] = observations[k][1:]
horizon, batch_size = next_observations[
- "observation.image" if self._use_image else "observation.environment_state"
+ OBS_IMAGE if self.config.image_features else OBS_ENV_STATE
].shape[:2]
# Run latent rollout using the latent dynamics model and policy model.
@@ -515,17 +501,16 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]:
"Q_value_loss": q_value_loss.item(),
"V_value_loss": v_value_loss.item(),
"pi_loss": pi_loss.item(),
- "loss": loss,
"sum_loss": loss.item() * self.config.horizon,
}
)
# Undo (b, t) -> (t, b).
for key in batch:
- if batch[key].ndim > 1:
+ if isinstance(batch[key], torch.Tensor) and batch[key].ndim > 1:
batch[key] = batch[key].transpose(1, 0)
- return info
+ return loss, info
def update(self):
"""Update the target model's parameters with an EMA step."""
@@ -543,7 +528,7 @@ def __init__(self, config: TDMPCConfig):
self.config = config
self._encoder = TDMPCObservationEncoder(config)
self._dynamics = nn.Sequential(
- nn.Linear(config.latent_dim + config.output_shapes["action"][0], config.mlp_dim),
+ nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim),
nn.LayerNorm(config.mlp_dim),
nn.Mish(),
nn.Linear(config.mlp_dim, config.mlp_dim),
@@ -554,7 +539,7 @@ def __init__(self, config: TDMPCConfig):
nn.Sigmoid(),
)
self._reward = nn.Sequential(
- nn.Linear(config.latent_dim + config.output_shapes["action"][0], config.mlp_dim),
+ nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim),
nn.LayerNorm(config.mlp_dim),
nn.Mish(),
nn.Linear(config.mlp_dim, config.mlp_dim),
@@ -569,12 +554,12 @@ def __init__(self, config: TDMPCConfig):
nn.Linear(config.mlp_dim, config.mlp_dim),
nn.LayerNorm(config.mlp_dim),
nn.Mish(),
- nn.Linear(config.mlp_dim, config.output_shapes["action"][0]),
+ nn.Linear(config.mlp_dim, config.action_feature.shape[0]),
)
self._Qs = nn.ModuleList(
[
nn.Sequential(
- nn.Linear(config.latent_dim + config.output_shapes["action"][0], config.mlp_dim),
+ nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim),
nn.LayerNorm(config.mlp_dim),
nn.Tanh(),
nn.Linear(config.mlp_dim, config.mlp_dim),
@@ -615,9 +600,9 @@ def _apply_fn(m):
self.apply(_apply_fn)
for m in [self._reward, *self._Qs]:
- assert isinstance(
- m[-1], nn.Linear
- ), "Sanity check. The last linear layer needs 0 initialization on weights."
+ assert isinstance(m[-1], nn.Linear), (
+ "Sanity check. The last linear layer needs 0 initialization on weights."
+ )
nn.init.zeros_(m[-1].weight)
nn.init.zeros_(m[-1].bias) # this has already been done, but keep this line here for good measure
@@ -714,10 +699,13 @@ def __init__(self, config: TDMPCConfig):
super().__init__()
self.config = config
- if "observation.image" in config.input_shapes:
+ if config.image_features:
self.image_enc_layers = nn.Sequential(
nn.Conv2d(
- config.input_shapes["observation.image"][0], config.image_encoder_hidden_dim, 7, stride=2
+ next(iter(config.image_features.values())).shape[0],
+ config.image_encoder_hidden_dim,
+ 7,
+ stride=2,
),
nn.ReLU(),
nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 5, stride=2),
@@ -727,9 +715,8 @@ def __init__(self, config: TDMPCConfig):
nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2),
nn.ReLU(),
)
- dummy_batch = torch.zeros(1, *config.input_shapes["observation.image"])
- with torch.inference_mode():
- out_shape = self.image_enc_layers(dummy_batch).shape[1:]
+ dummy_shape = (1, *next(iter(config.image_features.values())).shape)
+ out_shape = get_output_shape(self.image_enc_layers, dummy_shape)[1:]
self.image_enc_layers.extend(
nn.Sequential(
nn.Flatten(),
@@ -738,19 +725,19 @@ def __init__(self, config: TDMPCConfig):
nn.Sigmoid(),
)
)
- if "observation.state" in config.input_shapes:
+
+ if config.robot_state_feature:
self.state_enc_layers = nn.Sequential(
- nn.Linear(config.input_shapes["observation.state"][0], config.state_encoder_hidden_dim),
+ nn.Linear(config.robot_state_feature.shape[0], config.state_encoder_hidden_dim),
nn.ELU(),
nn.Linear(config.state_encoder_hidden_dim, config.latent_dim),
nn.LayerNorm(config.latent_dim),
nn.Sigmoid(),
)
- if "observation.environment_state" in config.input_shapes:
+
+ if config.env_state_feature:
self.env_state_enc_layers = nn.Sequential(
- nn.Linear(
- config.input_shapes["observation.environment_state"][0], config.state_encoder_hidden_dim
- ),
+ nn.Linear(config.env_state_feature.shape[0], config.state_encoder_hidden_dim),
nn.ELU(),
nn.Linear(config.state_encoder_hidden_dim, config.latent_dim),
nn.LayerNorm(config.latent_dim),
@@ -765,12 +752,16 @@ def forward(self, obs_dict: dict[str, Tensor]) -> Tensor:
"""
feat = []
# NOTE: Order of observations matters here.
- if "observation.image" in self.config.input_shapes:
- feat.append(flatten_forward_unflatten(self.image_enc_layers, obs_dict["observation.image"]))
- if "observation.environment_state" in self.config.input_shapes:
- feat.append(self.env_state_enc_layers(obs_dict["observation.environment_state"]))
- if "observation.state" in self.config.input_shapes:
- feat.append(self.state_enc_layers(obs_dict["observation.state"]))
+ if self.config.image_features:
+ feat.append(
+ flatten_forward_unflatten(
+ self.image_enc_layers, obs_dict[next(iter(self.config.image_features))]
+ )
+ )
+ if self.config.env_state_feature:
+ feat.append(self.env_state_enc_layers(obs_dict[OBS_ENV_STATE]))
+ if self.config.robot_state_feature:
+ feat.append(self.state_enc_layers(obs_dict[OBS_STATE]))
return torch.stack(feat, dim=0).mean(0)
diff --git a/src/lerobot/policies/utils.py b/src/lerobot/policies/utils.py
new file mode 100644
index 0000000000..5659e8727a
--- /dev/null
+++ b/src/lerobot/policies/utils.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import deque
+
+import torch
+from torch import nn
+
+
+def populate_queues(
+ queues: dict[str, deque], batch: dict[str, torch.Tensor], exclude_keys: list[str] | None = None
+):
+ if exclude_keys is None:
+ exclude_keys = []
+ for key in batch:
+ # Ignore keys not in the queues already (leaving the responsibility to the caller to make sure the
+ # queues have the keys they want).
+ if key not in queues or key in exclude_keys:
+ continue
+ if len(queues[key]) != queues[key].maxlen:
+ # initialize by copying the first observation several times until the queue is full
+ while len(queues[key]) != queues[key].maxlen:
+ queues[key].append(batch[key])
+ else:
+ # add latest observation to the queue
+ queues[key].append(batch[key])
+ return queues
+
+
+def get_device_from_parameters(module: nn.Module) -> torch.device:
+ """Get a module's device by checking one of its parameters.
+
+ Note: assumes that all parameters have the same device
+ """
+ return next(iter(module.parameters())).device
+
+
+def get_dtype_from_parameters(module: nn.Module) -> torch.dtype:
+ """Get a module's parameter dtype by checking one of its parameters.
+
+ Note: assumes that all parameters have the same dtype.
+ """
+ return next(iter(module.parameters())).dtype
+
+
+def get_output_shape(module: nn.Module, input_shape: tuple) -> tuple:
+ """
+ Calculates the output shape of a PyTorch module given an input shape.
+
+ Args:
+ module (nn.Module): a PyTorch module
+ input_shape (tuple): A tuple representing the input shape, e.g., (batch_size, channels, height, width)
+
+ Returns:
+ tuple: The output shape of the module.
+ """
+ dummy_input = torch.zeros(size=input_shape)
+ with torch.inference_mode():
+ output = module(dummy_input)
+ return tuple(output.shape)
diff --git a/src/lerobot/policies/vqbet/configuration_vqbet.py b/src/lerobot/policies/vqbet/configuration_vqbet.py
new file mode 100644
index 0000000000..d7a79f1891
--- /dev/null
+++ b/src/lerobot/policies/vqbet/configuration_vqbet.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+
+# Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru
+# and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto
+# and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.configs.types import NormalizationMode
+from lerobot.optim.optimizers import AdamConfig
+from lerobot.optim.schedulers import VQBeTSchedulerConfig
+
+
+@PreTrainedConfig.register_subclass("vqbet")
+@dataclass
+class VQBeTConfig(PreTrainedConfig):
+ """Configuration class for VQ-BeT.
+
+ Defaults are configured for training with PushT providing proprioceptive and single camera observations.
+
+ The parameters you will most likely need to change are the ones which depend on the environment / sensors.
+ Those are: `input_shapes` and `output_shapes`.
+
+ Notes on the inputs and outputs:
+ - "observation.state" is required as an input key.
+ - At least one key starting with "observation.image is required as an input.
+ - If there are multiple keys beginning with "observation.image" they are treated as multiple camera
+ views. Right now we only support all images having the same shape.
+ - "action" is required as an output key.
+
+ Args:
+ n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
+ current step and additional steps going back).
+ n_action_pred_token: Total number of current token and future tokens that VQ-BeT predicts.
+ action_chunk_size: Action chunk size of each action prediction token.
+ input_shapes: A dictionary defining the shapes of the input data for the policy.
+ The key represents the input data name, and the value is a list indicating the dimensions
+ of the corresponding data. For example, "observation.image" refers to an input from
+ a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution.
+ Importantly, shapes doesnt include batch dimension or temporal dimension.
+ output_shapes: A dictionary defining the shapes of the output data for the policy.
+ The key represents the output data name, and the value is a list indicating the dimensions
+ of the corresponding data. For example, "action" refers to an output shape of [14], indicating
+ 14-dimensional actions. Importantly, shapes doesnt include batch dimension or temporal dimension.
+ input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"),
+ and the value specifies the normalization mode to apply. The two available modes are "mean_std"
+ which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a
+ [-1, 1] range.
+ output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
+ original scale. Note that this is also used for normalizing the training targets.
+ vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
+ crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit
+ within the image size. If None, no cropping is done.
+ crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
+ mode).
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
+ `None` means no pretrained weights.
+ use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
+ The group sizes are set to be about 16 (to be precise, feature_dim // 16).
+ spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax.
+ n_vqvae_training_steps: Number of optimization steps for training Residual VQ.
+ vqvae_n_embed: Number of embedding vectors in the RVQ dictionary (each layer).
+ vqvae_embedding_dim: Dimension of each embedding vector in the RVQ dictionary.
+ vqvae_enc_hidden_dim: Size of hidden dimensions of Encoder / Decoder part of Residaul VQ-VAE
+ gpt_block_size: Max block size of minGPT (should be larger than the number of input tokens)
+ gpt_input_dim: Size of output input of GPT. This is also used as the dimension of observation features.
+ gpt_output_dim: Size of output dimension of GPT. This is also used as a input dimension of offset / bin prediction headers.
+ gpt_n_layer: Number of layers of GPT
+ gpt_n_head: Number of headers of GPT
+ gpt_hidden_dim: Size of hidden dimensions of GPT
+ dropout: Dropout rate for GPT
+ mlp_hidden_dim: Size of hidden dimensions of offset header / bin prediction headers parts of VQ-BeT
+ offset_loss_weight: A constant that is multiplied to the offset loss
+ primary_code_loss_weight: A constant that is multiplied to the primary code prediction loss
+ secondary_code_loss_weight: A constant that is multiplied to the secondary code prediction loss
+ bet_softmax_temperature: Sampling temperature of code for rollout with VQ-BeT
+ sequentially_select: Whether select code of primary / secondary as sequentially (pick primary code,
+ and then select secodnary code), or at the same time.
+ """
+
+ # Inputs / output structure.
+ n_obs_steps: int = 5
+ n_action_pred_token: int = 3
+ action_chunk_size: int = 5
+
+ normalization_mapping: dict[str, NormalizationMode] = field(
+ default_factory=lambda: {
+ "VISUAL": NormalizationMode.IDENTITY,
+ "STATE": NormalizationMode.MIN_MAX,
+ "ACTION": NormalizationMode.MIN_MAX,
+ }
+ )
+
+ # Architecture / modeling.
+ # Vision backbone.
+ vision_backbone: str = "resnet18"
+ crop_shape: tuple[int, int] | None = (84, 84)
+ crop_is_random: bool = True
+ pretrained_backbone_weights: str | None = None
+ use_group_norm: bool = True
+ spatial_softmax_num_keypoints: int = 32
+ # VQ-VAE
+ n_vqvae_training_steps: int = 20000
+ vqvae_n_embed: int = 16
+ vqvae_embedding_dim: int = 256
+ vqvae_enc_hidden_dim: int = 128
+ # VQ-BeT
+ gpt_block_size: int = 500
+ gpt_input_dim: int = 512
+ gpt_output_dim: int = 512
+ gpt_n_layer: int = 8
+ gpt_n_head: int = 8
+ gpt_hidden_dim: int = 512
+ dropout: float = 0.1
+ mlp_hidden_dim: int = 1024
+ offset_loss_weight: float = 10000.0
+ primary_code_loss_weight: float = 5.0
+ secondary_code_loss_weight: float = 0.5
+ bet_softmax_temperature: float = 0.1
+ sequentially_select: bool = False
+
+ # Training presets
+ optimizer_lr: float = 1e-4
+ optimizer_betas: tuple = (0.95, 0.999)
+ optimizer_eps: float = 1e-8
+ optimizer_weight_decay: float = 1e-6
+ optimizer_vqvae_lr: float = 1e-3
+ optimizer_vqvae_weight_decay: float = 1e-4
+ scheduler_warmup_steps: int = 500
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ """Input validation (not exhaustive)."""
+ if not self.vision_backbone.startswith("resnet"):
+ raise ValueError(
+ f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
+ )
+
+ def get_optimizer_preset(self) -> AdamConfig:
+ return AdamConfig(
+ lr=self.optimizer_lr,
+ betas=self.optimizer_betas,
+ eps=self.optimizer_eps,
+ weight_decay=self.optimizer_weight_decay,
+ )
+
+ def get_scheduler_preset(self) -> VQBeTSchedulerConfig:
+ return VQBeTSchedulerConfig(
+ num_warmup_steps=self.scheduler_warmup_steps,
+ num_vqvae_training_steps=self.n_vqvae_training_steps,
+ )
+
+ def validate_features(self) -> None:
+ # Note: this check was previously performed inside VQBeTRgbEncoder in the form of
+ # assert len(image_keys) == 1
+ if not len(self.image_features) == 1:
+ raise ValueError("You must provide only one image among the inputs.")
+
+ if self.crop_shape is not None:
+ for key, image_ft in self.image_features.items():
+ if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]:
+ raise ValueError(
+ f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} "
+ f"for `crop_shape` and {image_ft.shape} for "
+ f"`{key}`."
+ )
+
+ # Check that all input images have the same shape.
+ first_image_key, first_image_ft = next(iter(self.image_features.items()))
+ for key, image_ft in self.image_features.items():
+ if image_ft.shape != first_image_ft.shape:
+ raise ValueError(
+ f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match."
+ )
+
+ @property
+ def observation_delta_indices(self) -> list:
+ return list(range(1 - self.n_obs_steps, 1))
+
+ @property
+ def action_delta_indices(self) -> list:
+ return list(range(1 - self.n_obs_steps, self.n_action_pred_token + self.action_chunk_size - 1))
+
+ @property
+ def reward_delta_indices(self) -> None:
+ return None
diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/src/lerobot/policies/vqbet/modeling_vqbet.py
similarity index 85%
rename from lerobot/common/policies/vqbet/modeling_vqbet.py
rename to src/lerobot/policies/vqbet/modeling_vqbet.py
index 98adce00b1..b271298a3f 100644
--- a/lerobot/common/policies/vqbet/modeling_vqbet.py
+++ b/src/lerobot/policies/vqbet/modeling_vqbet.py
@@ -16,39 +16,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import math
import warnings
from collections import deque
-from typing import Callable, List
+from collections.abc import Callable
import einops
import numpy as np
import torch
import torch.nn.functional as F # noqa: N812
import torchvision
-from huggingface_hub import PyTorchModelHubMixin
from torch import Tensor, nn
-from torch.optim.lr_scheduler import LambdaLR
-from lerobot.common.policies.normalize import Normalize, Unnormalize
-from lerobot.common.policies.utils import get_device_from_parameters, populate_queues
-from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
-from lerobot.common.policies.vqbet.vqbet_utils import GPT, ResidualVQ
+from lerobot.constants import ACTION, OBS_IMAGES, OBS_STATE
+from lerobot.policies.normalize import Normalize, Unnormalize
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.utils import get_device_from_parameters, get_output_shape, populate_queues
+from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
+from lerobot.policies.vqbet.vqbet_utils import GPT, ResidualVQ
# ruff: noqa: N806
-class VQBeTPolicy(
- nn.Module,
- PyTorchModelHubMixin,
- library_name="lerobot",
- repo_url="https://github.com/huggingface/lerobot",
- tags=["robotics", "vqbet"],
-):
+class VQBeTPolicy(PreTrainedPolicy):
"""
VQ-BeT Policy as per "Behavior Generation with Latent Actions"
"""
+ config_class = VQBeTConfig
name = "vqbet"
def __init__(
@@ -63,38 +57,81 @@ def __init__(
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
that they will be passed with a call to `load_state_dict` before the policy is used.
"""
- super().__init__()
- if config is None:
- config = VQBeTConfig()
+ super().__init__(config)
+ config.validate_features()
self.config = config
- self.normalize_inputs = Normalize(
- config.input_shapes, config.input_normalization_modes, dataset_stats
- )
+
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
self.normalize_targets = Normalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
self.unnormalize_outputs = Unnormalize(
- config.output_shapes, config.output_normalization_modes, dataset_stats
+ config.output_features, config.normalization_mapping, dataset_stats
)
self.vqbet = VQBeTModel(config)
- self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")]
-
self.reset()
+ def get_optim_params(self) -> dict:
+ vqvae_params = (
+ list(self.vqbet.action_head.vqvae_model.encoder.parameters())
+ + list(self.vqbet.action_head.vqvae_model.decoder.parameters())
+ + list(self.vqbet.action_head.vqvae_model.vq_layer.parameters())
+ )
+ decay_params, no_decay_params = self.vqbet.policy.configure_parameters()
+ decay_params = (
+ decay_params
+ + list(self.vqbet.rgb_encoder.parameters())
+ + list(self.vqbet.state_projector.parameters())
+ + list(self.vqbet.rgb_feature_projector.parameters())
+ + [self.vqbet.action_token]
+ + list(self.vqbet.action_head.map_to_cbet_preds_offset.parameters())
+ )
+
+ if self.config.sequentially_select:
+ decay_params = (
+ decay_params
+ + list(self.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters())
+ + list(self.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters())
+ )
+ else:
+ decay_params = decay_params + list(self.vqbet.action_head.map_to_cbet_preds_bin.parameters())
+
+ return [
+ {
+ "params": decay_params,
+ },
+ {
+ "params": vqvae_params,
+ "weight_decay": self.config.optimizer_vqvae_weight_decay,
+ "lr": self.config.optimizer_vqvae_lr,
+ },
+ {
+ "params": no_decay_params,
+ "weight_decay": 0.0,
+ },
+ ]
+
def reset(self):
"""
Clear observation and action queues. Should be called on `env.reset()`
queues are populated during rollout of the policy, they contain the n latest observations and actions
"""
self._queues = {
- "observation.images": deque(maxlen=self.config.n_obs_steps),
- "observation.state": deque(maxlen=self.config.n_obs_steps),
- "action": deque(maxlen=self.config.action_chunk_size),
+ OBS_IMAGES: deque(maxlen=self.config.n_obs_steps),
+ OBS_STATE: deque(maxlen=self.config.n_obs_steps),
+ ACTION: deque(maxlen=self.config.action_chunk_size),
}
- @torch.no_grad
+ @torch.no_grad()
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
+ batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
+ actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size]
+ actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
+ return actions
+
+ @torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select a single action given environment observations.
@@ -105,7 +142,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
batch = self.normalize_inputs(batch)
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4)
+ batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
# Note: It's important that this happens after stacking the images into a single key.
self._queues = populate_queues(self._queues, batch)
@@ -115,48 +152,44 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
stacklevel=1,
)
- if len(self._queues["action"]) == 0:
- batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
- actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size]
-
- # the dimension of returned action is (batch_size, action_chunk_size, action_dim)
- actions = self.unnormalize_outputs({"action": actions})["action"]
+ if len(self._queues[ACTION]) == 0:
+ actions = self.predict_action_chunk(batch)
# since the data in the action queue's dimension is (action_chunk_size, batch_size, action_dim), we transpose the action and fill the queue
- self._queues["action"].extend(actions.transpose(0, 1))
+ self._queues[ACTION].extend(actions.transpose(0, 1))
- action = self._queues["action"].popleft()
+ action = self._queues[ACTION].popleft()
return action
- def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
"""Run the batch through the model and compute the loss for training or validation."""
batch = self.normalize_inputs(batch)
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
- batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4)
+ batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
batch = self.normalize_targets(batch)
- # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://arxiv.org/pdf/2403.03181)
+ # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://huggingface.co/papers/2403.03181)
if not self.vqbet.action_head.vqvae_model.discretized.item():
# loss: total loss of training RVQ
# n_different_codes: how many of the total possible VQ codes are being used in single batch (how many of them have at least one encoder embedding as a nearest neighbor). This can be at most `vqvae_n_embed * number of layers of RVQ (=2)`.
# n_different_combinations: how many different code combinations are being used out of all possible combinations in single batch. This can be at most `vqvae_n_embed ^ number of layers of RVQ (=2)` (hint consider the RVQ as a decision tree).
loss, n_different_codes, n_different_combinations, recon_l1_error = (
- self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch["action"])
+ self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch[ACTION])
)
- return {
- "loss": loss,
+ return loss, {
"n_different_codes": n_different_codes,
"n_different_combinations": n_different_combinations,
"recon_l1_error": recon_l1_error,
}
# if Residual VQ is already trained, VQ-BeT trains its GPT and bin prediction head / offset prediction head parts.
_, loss_dict = self.vqbet(batch, rollout=False)
+ loss = loss_dict.pop("loss")
- return loss_dict
+ return loss, loss_dict
class SpatialSoftmax(nn.Module):
"""
Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al.
- (https://arxiv.org/pdf/1509.06113). A minimal port of the robomimic implementation.
+ (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation.
At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass"
of activations of each channel, i.e., keypoints in the image space for the policy to focus on.
@@ -288,14 +321,14 @@ def __init__(self, config: VQBeTConfig):
self.config = config
self.rgb_encoder = VQBeTRgbEncoder(config)
- self.num_images = len([k for k in config.input_shapes if k.startswith("observation.image")])
+ self.num_images = len(self.config.image_features)
# This action query token is used as a prompt for querying action chunks. Please refer to "A_Q" in the image above.
# Note: During the forward pass, this token is repeated as many times as needed. The authors also experimented with initializing the necessary number of tokens independently and observed inferior results.
self.action_token = nn.Parameter(torch.randn(1, 1, self.config.gpt_input_dim))
# To input state and observation features into GPT layers, we first project the features to fit the shape of input size of GPT.
self.state_projector = MLP(
- config.input_shapes["observation.state"][0], hidden_channels=[self.config.gpt_input_dim]
+ config.robot_state_feature.shape[0], hidden_channels=[self.config.gpt_input_dim]
)
self.rgb_feature_projector = MLP(
self.rgb_encoder.feature_dim, hidden_channels=[self.config.gpt_input_dim]
@@ -313,7 +346,7 @@ def __init__(self, config: VQBeTConfig):
torch.row_stack([torch.arange(i, i + self.config.action_chunk_size) for i in range(num_tokens)]),
)
- def forward(self, batch: dict[str, Tensor], rollout: bool) -> Tensor:
+ def forward(self, batch: dict[str, Tensor], rollout: bool) -> tuple[dict, dict]:
# Input validation.
assert set(batch).issuperset({"observation.state", "observation.images"})
batch_size, n_obs_steps = batch["observation.state"].shape[:2]
@@ -350,15 +383,15 @@ def forward(self, batch: dict[str, Tensor], rollout: bool) -> Tensor:
# get action features (pass through GPT)
features = self.policy(input_tokens)
- # len(self.config.input_shapes) is the number of different observation modes.
+ # len(self.config.input_features) is the number of different observation modes.
# this line gets the index of action prompt tokens.
- historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_shapes) + 1) + len(
- self.config.input_shapes
+ historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_features) + 1) + len(
+ self.config.input_features
)
# only extract the output tokens at the position of action query:
# Behavior Transformer (BeT), and VQ-BeT are both sequence-to-sequence prediction models,
- # mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://arxiv.org/pdf/2206.11251).
+ # mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://huggingface.co/papers/2206.11251).
# Thus, it predicts a historical action sequence, in addition to current and future actions (predicting future actions : optional).
if len_additional_action_token > 0:
features = torch.cat(
@@ -375,7 +408,7 @@ def forward(self, batch: dict[str, Tensor], rollout: bool) -> Tensor:
)
# else, it calculate overall loss (bin prediction loss, and offset loss)
else:
- output = batch["action"][:, self.select_target_actions_indices]
+ output = batch[ACTION][:, self.select_target_actions_indices]
loss = self.action_head.loss_fn(action_head_output, output, reduction="mean")
return action_head_output, loss
@@ -392,7 +425,7 @@ def __init__(self, config: VQBeTConfig):
self.map_to_cbet_preds_offset: output the predicted offsets for all the codes in all the layers.
The input dimension of ` self.map_to_cbet_preds_offset` is same with the output of GPT,
- and the output dimension of ` self.map_to_cbet_preds_offset` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed * config.action_chunk_size * config.output_shapes["action"][0]`.
+ and the output dimension of ` self.map_to_cbet_preds_offset` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed * config.action_chunk_size * config.action_feature.shape[0]`.
"""
super().__init__()
@@ -419,7 +452,7 @@ def __init__(self, config: VQBeTConfig):
self.vqvae_model.vqvae_num_layers
* self.config.vqvae_n_embed
* config.action_chunk_size
- * config.output_shapes["action"][0],
+ * config.action_feature.shape[0],
],
)
# loss
@@ -453,10 +486,10 @@ def discretize(self, n_vqvae_training_steps, actions):
param.requires_grad = False
return loss, n_different_codes, n_different_combinations, recon_l1_error
- def forward(self, x, **kwargs):
+ def forward(self, x, **kwargs) -> dict:
# N is the batch size, and T is number of action query tokens, which are process through same GPT
N, T, _ = x.shape
- # we calculate N and T side parallely. Thus, the dimensions would be
+ # we calculate N and T side parallelly. Thus, the dimensions would be
# (batch size * number of action query tokens, action chunk size, action dimension)
x = einops.rearrange(x, "N T WA -> (N T) WA")
@@ -623,84 +656,6 @@ def loss_fn(self, pred, target, **kwargs):
return loss_dict
-class VQBeTOptimizer(torch.optim.Adam):
- def __init__(self, policy, cfg):
- vqvae_params = (
- list(policy.vqbet.action_head.vqvae_model.encoder.parameters())
- + list(policy.vqbet.action_head.vqvae_model.decoder.parameters())
- + list(policy.vqbet.action_head.vqvae_model.vq_layer.parameters())
- )
- decay_params, no_decay_params = policy.vqbet.policy.configure_parameters()
- decay_params = (
- decay_params
- + list(policy.vqbet.rgb_encoder.parameters())
- + list(policy.vqbet.state_projector.parameters())
- + list(policy.vqbet.rgb_feature_projector.parameters())
- + [policy.vqbet.action_token]
- + list(policy.vqbet.action_head.map_to_cbet_preds_offset.parameters())
- )
-
- if cfg.policy.sequentially_select:
- decay_params = (
- decay_params
- + list(policy.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters())
- + list(policy.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters())
- )
- else:
- decay_params = decay_params + list(policy.vqbet.action_head.map_to_cbet_preds_bin.parameters())
-
- optim_groups = [
- {
- "params": decay_params,
- "weight_decay": cfg.training.adam_weight_decay,
- "lr": cfg.training.lr,
- },
- {
- "params": vqvae_params,
- "weight_decay": 0.0001,
- "lr": cfg.training.vqvae_lr,
- },
- {
- "params": no_decay_params,
- "weight_decay": 0.0,
- "lr": cfg.training.lr,
- },
- ]
- super().__init__(
- optim_groups,
- cfg.training.lr,
- cfg.training.adam_betas,
- cfg.training.adam_eps,
- )
-
-
-class VQBeTScheduler(nn.Module):
- def __init__(self, optimizer, cfg):
- super().__init__()
- n_vqvae_training_steps = cfg.training.n_vqvae_training_steps
-
- num_warmup_steps = cfg.training.lr_warmup_steps
- num_training_steps = cfg.training.offline_steps
- num_cycles = 0.5
-
- def lr_lambda(current_step):
- if current_step < n_vqvae_training_steps:
- return float(1)
- else:
- current_step = current_step - n_vqvae_training_steps
- if current_step < num_warmup_steps:
- return float(current_step) / float(max(1, num_warmup_steps))
- progress = float(current_step - num_warmup_steps) / float(
- max(1, num_training_steps - num_warmup_steps)
- )
- return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
-
- self.lr_scheduler = LambdaLR(optimizer, lr_lambda, -1)
-
- def step(self):
- self.lr_scheduler.step()
-
-
class VQBeTRgbEncoder(nn.Module):
"""Encode an RGB image into a 1D feature vector.
@@ -743,19 +698,15 @@ def __init__(self, config: VQBeTConfig):
# Set up pooling and final layers.
# Use a dry run to get the feature map shape.
- # The dummy input should take the number of image channels from `config.input_shapes` and it should
+ # The dummy input should take the number of image channels from `config.image_features` and it should
# use the height and width from `config.crop_shape` if it is provided, otherwise it should use the
- # height and width from `config.input_shapes`.
- image_keys = [k for k in config.input_shapes if k.startswith("observation.image")]
- assert len(image_keys) == 1
- image_key = image_keys[0]
- dummy_input_h_w = (
- config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:]
- )
- dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w))
- with torch.inference_mode():
- dummy_feature_map = self.backbone(dummy_input)
- feature_map_shape = tuple(dummy_feature_map.shape[1:])
+ # height and width from `config.image_features`.
+
+ images_shape = next(iter(config.image_features.values())).shape
+ dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:]
+ dummy_shape = (1, images_shape[0], *dummy_shape_h_w)
+ feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:]
+
self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints)
self.feature_dim = config.spatial_softmax_num_keypoints * 2
self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim)
@@ -825,7 +776,7 @@ def __init__(
Encoder and decoder are MLPs consisting of an input, output layer, and hidden layer, respectively.
The vq_layer uses residual VQs.
- This class contains functions for training the encoder and decoder along with the residual VQ layer (for trainign phase 1),
+ This class contains functions for training the encoder and decoder along with the residual VQ layer (for training phase 1),
as well as functions to help BeT training part in training phase 2.
"""
@@ -844,7 +795,7 @@ def __init__(
)
self.encoder = MLP(
- in_channels=self.config.output_shapes["action"][0] * self.config.action_chunk_size,
+ in_channels=self.config.action_feature.shape[0] * self.config.action_chunk_size,
hidden_channels=[
config.vqvae_enc_hidden_dim,
config.vqvae_enc_hidden_dim,
@@ -856,7 +807,7 @@ def __init__(
hidden_channels=[
config.vqvae_enc_hidden_dim,
config.vqvae_enc_hidden_dim,
- self.config.output_shapes["action"][0] * self.config.action_chunk_size,
+ self.config.action_feature.shape[0] * self.config.action_chunk_size,
],
)
@@ -872,13 +823,13 @@ def get_action_from_latent(self, latent):
# given latent vector, this function outputs the decoded action.
output = self.decoder(latent)
if self.config.action_chunk_size == 1:
- return einops.rearrange(output, "N (T A) -> N T A", A=self.config.output_shapes["action"][0])
+ return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0])
else:
- return einops.rearrange(output, "N (T A) -> N T A", A=self.config.output_shapes["action"][0])
+ return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0])
def get_code(self, state):
- # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://arxiv.org/pdf/2403.03181)
- # this function outputs the `GT code` of given action using frozen encoder and quantization layers. (please refer to Figure 2. in the paper https://arxiv.org/pdf/2403.03181)
+ # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://huggingface.co/papers/2403.03181)
+ # this function outputs the `GT code` of given action using frozen encoder and quantization layers. (please refer to Figure 2. in the paper https://huggingface.co/papers/2403.03181)
state = einops.rearrange(state, "N T A -> N (T A)")
with torch.no_grad():
state_rep = self.encoder(state)
@@ -891,7 +842,7 @@ def get_code(self, state):
return state_vq, vq_code
def vqvae_forward(self, state):
- # This function passes the given data through Residual VQ with Encoder and Decoder. Please refer to section 3.2 in the paper https://arxiv.org/pdf/2403.03181).
+ # This function passes the given data through Residual VQ with Encoder and Decoder. Please refer to section 3.2 in the paper https://huggingface.co/papers/2403.03181).
state = einops.rearrange(state, "N T A -> N (T A)")
# We start with passing action (or action chunk) at:t+n through the encoder ϕ.
state_rep = self.encoder(state)
@@ -950,7 +901,7 @@ class MLP(torch.nn.Sequential):
def __init__(
self,
in_channels: int,
- hidden_channels: List[int],
+ hidden_channels: list[int],
):
layers = []
in_dim = in_channels
diff --git a/lerobot/common/policies/vqbet/vqbet_utils.py b/src/lerobot/policies/vqbet/vqbet_utils.py
similarity index 96%
rename from lerobot/common/policies/vqbet/vqbet_utils.py
rename to src/lerobot/policies/vqbet/vqbet_utils.py
index 90a2cfda37..e0afe55852 100644
--- a/lerobot/common/policies/vqbet/vqbet_utils.py
+++ b/src/lerobot/policies/vqbet/vqbet_utils.py
@@ -17,10 +17,10 @@
# limitations under the License.
import math
+from collections.abc import Callable
from functools import partial
from math import ceil
from random import randrange
-from typing import Callable
import torch
import torch.distributed as distributed
@@ -30,7 +30,7 @@
from torch.cuda.amp import autocast
from torch.optim import Optimizer
-from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
+from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
# ruff: noqa: N806
@@ -38,7 +38,7 @@
This file is part of a VQ-BeT that utilizes code from the following repositories:
- Vector Quantize PyTorch code is licensed under the MIT License:
- Origianl source: https://github.com/lucidrains/vector-quantize-pytorch
+ Original source: https://github.com/lucidrains/vector-quantize-pytorch
- nanoGPT part is an adaptation of Andrej Karpathy's nanoGPT implementation in PyTorch.
Original source: https://github.com/karpathy/nanoGPT
@@ -198,14 +198,14 @@ def __init__(self, config: VQBeTConfig):
# report number of parameters
n_params = sum(p.numel() for p in self.parameters())
- print("number of parameters: {:.2f}M".format(n_params / 1e6))
+ print(f"number of parameters: {n_params / 1e6:.2f}M")
def forward(self, input, targets=None):
device = input.device
b, t, d = input.size()
- assert (
- t <= self.config.gpt_block_size
- ), f"Cannot forward sequence of length {t}, block size is only {self.config.gpt_block_size}"
+ assert t <= self.config.gpt_block_size, (
+ f"Cannot forward sequence of length {t}, block size is only {self.config.gpt_block_size}"
+ )
# positional encodings that are added to the input embeddings
pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
@@ -255,7 +255,7 @@ def configure_parameters(self):
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, _p in m.named_parameters():
- fpn = "{}.{}".format(mn, pn) if mn else pn # full param name
+ fpn = f"{mn}.{pn}" if mn else pn # full param name
if pn.endswith("bias"):
# all biases will not be decayed
no_decay.add(fpn)
@@ -273,10 +273,10 @@ def configure_parameters(self):
assert len(inter_params) == 0, "parameters {} made it into both decay/no_decay sets!".format(
str(inter_params)
)
- assert (
- len(param_dict.keys() - union_params) == 0
- ), "parameters {} were not separated into either decay/no_decay set!".format(
- str(param_dict.keys() - union_params),
+ assert len(param_dict.keys() - union_params) == 0, (
+ "parameters {} were not separated into either decay/no_decay set!".format(
+ str(param_dict.keys() - union_params),
+ )
)
decay = [param_dict[pn] for pn in sorted(decay)]
@@ -289,7 +289,7 @@ def configure_parameters(self):
This file is a part for Residual Vector Quantization that utilizes code from the following repository:
- Phil Wang's vector-quantize-pytorch implementation in PyTorch.
- Origianl source: https://github.com/lucidrains/vector-quantize-pytorch
+ Original source: https://github.com/lucidrains/vector-quantize-pytorch
- The vector-quantize-pytorch code is licensed under the MIT License:
@@ -336,7 +336,7 @@ class ResidualVQ(nn.Module):
"""
Residual VQ is composed of multiple VectorQuantize layers.
- Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
+ Follows Algorithm 1. in https://huggingface.co/papers/2107.03312
"Residual Vector Quantizer (a.k.a. multi-stage vector quantizer [36]) cascades Nq layers of VQ as follows. The unquantized input vector is
passed through a first VQ and quantization residuals are computed. The residuals are then iteratively quantized by a sequence of additional
Nq -1 vector quantizers, as described in Algorithm 1."
@@ -419,9 +419,9 @@ def get_codebook_vector_from_indices(self, indices):
# and the network should be able to reconstruct
if quantize_dim < self.num_quantizers:
- assert (
- self.quantize_dropout > 0.0
- ), "quantize dropout must be greater than 0 if you wish to reconstruct from a signal with less fine quantizations"
+ assert self.quantize_dropout > 0.0, (
+ "quantize dropout must be greater than 0 if you wish to reconstruct from a signal with less fine quantizations"
+ )
indices = F.pad(indices, (0, self.num_quantizers - quantize_dim), value=-1)
# get ready for gathering
@@ -472,9 +472,9 @@ def forward(self, x, indices=None, return_all_codes=False, sample_codebook_temp=
all_indices = []
if return_loss:
- assert not torch.any(
- indices == -1
- ), "some of the residual vq indices were dropped out. please use indices derived when the module is in eval mode to derive cross entropy loss"
+ assert not torch.any(indices == -1), (
+ "some of the residual vq indices were dropped out. please use indices derived when the module is in eval mode to derive cross entropy loss"
+ )
ce_losses = []
should_quantize_dropout = self.training and self.quantize_dropout and not return_loss
@@ -887,9 +887,9 @@ def calculate_ce_loss(codes):
# only calculate orthogonal loss for the activated codes for this batch
if self.orthogonal_reg_active_codes_only:
- assert not (
- is_multiheaded and self.separate_codebook_per_head
- ), "orthogonal regularization for only active codes not compatible with multi-headed with separate codebooks yet"
+ assert not (is_multiheaded and self.separate_codebook_per_head), (
+ "orthogonal regularization for only active codes not compatible with multi-headed with separate codebooks yet"
+ )
unique_code_ids = torch.unique(embed_ind)
codebook = codebook[:, unique_code_ids]
@@ -999,14 +999,14 @@ def gumbel_sample(
ind = sampling_logits.argmax(dim=dim)
one_hot = F.one_hot(ind, size).type(dtype)
- assert not (
- reinmax and not straight_through
- ), "reinmax can only be turned on if using straight through gumbel softmax"
+ assert not (reinmax and not straight_through), (
+ "reinmax can only be turned on if using straight through gumbel softmax"
+ )
if not straight_through or temperature <= 0.0 or not training:
return ind, one_hot
- # use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612
+ # use reinmax for better second-order accuracy - https://huggingface.co/papers/2304.08612
# algorithm 2
if reinmax:
@@ -1156,7 +1156,7 @@ def batched_embedding(indices, embeds):
def orthogonal_loss_fn(t):
- # eq (2) from https://arxiv.org/abs/2112.00384
+ # eq (2) from https://huggingface.co/papers/2112.00384
h, n = t.shape[:2]
normed_codes = F.normalize(t, p=2, dim=-1)
cosine_sim = einsum("h i d, h j d -> h i j", normed_codes, normed_codes)
@@ -1209,9 +1209,9 @@ def __init__(
self.gumbel_sample = gumbel_sample
self.sample_codebook_temp = sample_codebook_temp
- assert not (
- use_ddp and num_codebooks > 1 and kmeans_init
- ), "kmeans init is not compatible with multiple codebooks in distributed environment for now"
+ assert not (use_ddp and num_codebooks > 1 and kmeans_init), (
+ "kmeans init is not compatible with multiple codebooks in distributed environment for now"
+ )
self.sample_fn = sample_vectors_distributed if use_ddp and sync_kmeans else batched_sample_vectors
self.kmeans_all_reduce_fn = distributed.all_reduce if use_ddp and sync_kmeans else noop
@@ -1349,9 +1349,9 @@ def update_affine(self, data, embed, mask=None):
# calculate distributed variance
- variance_numer = reduce((data - batch_mean) ** 2, "h n d -> h 1 d", "sum")
- distributed.all_reduce(variance_numer)
- batch_variance = variance_numer / num_vectors
+ variance_number = reduce((data - batch_mean) ** 2, "h n d -> h 1 d", "sum")
+ distributed.all_reduce(variance_number)
+ batch_variance = variance_number / num_vectors
self.update_with_decay("batch_variance", batch_variance, self.affine_param_batch_decay)
diff --git a/src/lerobot/record.py b/src/lerobot/record.py
new file mode 100644
index 0000000000..d662efcab3
--- /dev/null
+++ b/src/lerobot/record.py
@@ -0,0 +1,397 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Records a dataset. Actions for the robot can be either generated by teleoperation or by a policy.
+
+Example:
+
+```shell
+python -m lerobot.record \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.cameras="{laptop: {type: opencv, camera_index: 0, width: 640, height: 480}}" \
+ --robot.id=black \
+ --dataset.repo_id=aliberts/record-test \
+ --dataset.num_episodes=2 \
+ --dataset.single_task="Grab the cube" \
+ # <- Teleop optional if you want to teleoperate to record or in between episodes with a policy \
+ # --teleop.type=so100_leader \
+ # --teleop.port=/dev/tty.usbmodem58760431551 \
+ # --teleop.id=blue \
+ # <- Policy optional if you want to record with a policy \
+ # --policy.path=${HF_USER}/my_policy \
+```
+
+Example recording with bimanual so100:
+```shell
+python -m lerobot.record \
+ --robot.type=bi_so100_follower \
+ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
+ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
+ --robot.id=bimanual_follower \
+ --robot.cameras='{
+ left: {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30},
+ top: {"type": "opencv", "index_or_path": 1, "width": 640, "height": 480, "fps": 30},
+ right: {"type": "opencv", "index_or_path": 2, "width": 640, "height": 480, "fps": 30}
+ }' \
+ --teleop.type=bi_so100_leader \
+ --teleop.left_arm_port=/dev/tty.usbmodem5A460828611 \
+ --teleop.right_arm_port=/dev/tty.usbmodem5A460826981 \
+ --teleop.id=bimanual_leader \
+ --display_data=true \
+ --dataset.repo_id=${HF_USER}/bimanual-so100-handover-cube \
+ --dataset.num_episodes=25 \
+ --dataset.single_task="Grab and handover the red cube to the other arm"
+```
+"""
+
+import logging
+import time
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from pprint import pformat
+
+from lerobot.cameras import ( # noqa: F401
+ CameraConfig, # noqa: F401
+)
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
+from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
+from lerobot.configs import parser
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.datasets.image_writer import safe_stop_image_writer
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
+from lerobot.datasets.video_utils import VideoEncodingManager
+from lerobot.policies.factory import make_policy
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.robots import ( # noqa: F401
+ Robot,
+ RobotConfig,
+ bi_so100_follower,
+ hope_jr,
+ koch_follower,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.teleoperators import ( # noqa: F401
+ Teleoperator,
+ TeleoperatorConfig,
+ bi_so100_leader,
+ homunculus,
+ koch_leader,
+ make_teleoperator_from_config,
+ so100_leader,
+ so101_leader,
+)
+from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop
+from lerobot.utils.control_utils import (
+ init_keyboard_listener,
+ is_headless,
+ predict_action,
+ sanity_check_dataset_name,
+ sanity_check_dataset_robot_compatibility,
+)
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import (
+ get_safe_torch_device,
+ init_logging,
+ log_say,
+)
+from lerobot.utils.visualization_utils import _init_rerun, log_rerun_data
+
+
+@dataclass
+class DatasetRecordConfig:
+ # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
+ repo_id: str
+ # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.")
+ single_task: str
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
+ root: str | Path | None = None
+ # Limit the frames per second.
+ fps: int = 30
+ # Number of seconds for data recording for each episode.
+ episode_time_s: int | float = 60
+ # Number of seconds for resetting the environment after each episode.
+ reset_time_s: int | float = 60
+ # Number of episodes to record.
+ num_episodes: int = 50
+ # Encode frames in the dataset into video
+ video: bool = True
+ # Upload dataset to Hugging Face hub.
+ push_to_hub: bool = True
+ # Upload on private repository on the Hugging Face hub.
+ private: bool = False
+ # Add tags to your dataset on the hub.
+ tags: list[str] | None = None
+ # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only;
+ # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes
+ # and threads depends on your system. We recommend 4 threads per camera with 0 processes.
+ # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses.
+ num_image_writer_processes: int = 0
+ # Number of threads writing the frames as png images on disk, per camera.
+ # Too many threads might cause unstable teleoperation fps due to main thread being blocked.
+ # Not enough threads might cause low camera fps.
+ num_image_writer_threads_per_camera: int = 4
+ # Number of episodes to record before batch encoding videos
+ # Set to 1 for immediate encoding (default behavior), or higher for batched encoding
+ video_encoding_batch_size: int = 1
+
+ def __post_init__(self):
+ if self.single_task is None:
+ raise ValueError("You need to provide a task as argument in `single_task`.")
+
+
+@dataclass
+class RecordConfig:
+ robot: RobotConfig
+ dataset: DatasetRecordConfig
+ # Whether to control the robot with a teleoperator
+ teleop: TeleoperatorConfig | None = None
+ # Whether to control the robot with a policy
+ policy: PreTrainedConfig | None = None
+ # Display all cameras on screen
+ display_data: bool = False
+ # Use vocal synthesis to read events.
+ play_sounds: bool = True
+ # Resume recording on an existing dataset.
+ resume: bool = False
+
+ def __post_init__(self):
+ # HACK: We parse again the cli args here to get the pretrained path if there was one.
+ policy_path = parser.get_path_arg("policy")
+ if policy_path:
+ cli_overrides = parser.get_cli_overrides("policy")
+ self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
+ self.policy.pretrained_path = policy_path
+
+ if self.teleop is None and self.policy is None:
+ raise ValueError("Choose a policy, a teleoperator or both to control the robot")
+
+ @classmethod
+ def __get_path_fields__(cls) -> list[str]:
+ """This enables the parser to load config from the policy using `--policy.path=local/dir`"""
+ return ["policy"]
+
+
+@safe_stop_image_writer
+def record_loop(
+ robot: Robot,
+ events: dict,
+ fps: int,
+ dataset: LeRobotDataset | None = None,
+ teleop: Teleoperator | list[Teleoperator] | None = None,
+ policy: PreTrainedPolicy | None = None,
+ control_time_s: int | None = None,
+ single_task: str | None = None,
+ display_data: bool = False,
+):
+ if dataset is not None and dataset.fps != fps:
+ raise ValueError(f"The dataset fps should be equal to requested fps ({dataset.fps} != {fps}).")
+
+ teleop_arm = teleop_keyboard = None
+ if isinstance(teleop, list):
+ teleop_keyboard = next((t for t in teleop if isinstance(t, KeyboardTeleop)), None)
+ teleop_arm = next(
+ (
+ t
+ for t in teleop
+ if isinstance(t, (so100_leader.SO100Leader, so101_leader.SO101Leader, koch_leader.KochLeader))
+ ),
+ None,
+ )
+
+ if not (teleop_arm and teleop_keyboard and len(teleop) == 2 and robot.name == "lekiwi_client"):
+ raise ValueError(
+ "For multi-teleop, the list must contain exactly one KeyboardTeleop and one arm teleoperator. Currently only supported for LeKiwi robot."
+ )
+
+ # if policy is given it needs cleaning up
+ if policy is not None:
+ policy.reset()
+
+ timestamp = 0
+ start_episode_t = time.perf_counter()
+ while timestamp < control_time_s:
+ start_loop_t = time.perf_counter()
+
+ if events["exit_early"]:
+ events["exit_early"] = False
+ break
+
+ observation = robot.get_observation()
+
+ if policy is not None or dataset is not None:
+ observation_frame = build_dataset_frame(dataset.features, observation, prefix="observation")
+
+ if policy is not None:
+ action_values = predict_action(
+ observation_frame,
+ policy,
+ get_safe_torch_device(policy.config.device),
+ policy.config.use_amp,
+ task=single_task,
+ robot_type=robot.robot_type,
+ )
+ action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)}
+ elif policy is None and isinstance(teleop, Teleoperator):
+ action = teleop.get_action()
+ elif policy is None and isinstance(teleop, list):
+ # TODO(pepijn, steven): clean the record loop for use of multiple robots (possibly with pipeline)
+ arm_action = teleop_arm.get_action()
+ arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
+
+ keyboard_action = teleop_keyboard.get_action()
+ base_action = robot._from_keyboard_to_base_action(keyboard_action)
+
+ action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
+ else:
+ logging.info(
+ "No policy or teleoperator provided, skipping action generation."
+ "This is likely to happen when resetting the environment without a teleop device."
+ "The robot won't be at its rest position at the start of the next episode."
+ )
+ continue
+
+ # Action can eventually be clipped using `max_relative_target`,
+ # so action actually sent is saved in the dataset.
+ sent_action = robot.send_action(action)
+
+ if dataset is not None:
+ action_frame = build_dataset_frame(dataset.features, sent_action, prefix="action")
+ frame = {**observation_frame, **action_frame}
+ dataset.add_frame(frame, task=single_task)
+
+ if display_data:
+ log_rerun_data(observation, action)
+
+ dt_s = time.perf_counter() - start_loop_t
+ busy_wait(1 / fps - dt_s)
+
+ timestamp = time.perf_counter() - start_episode_t
+
+
+@parser.wrap()
+def record(cfg: RecordConfig) -> LeRobotDataset:
+ init_logging()
+ logging.info(pformat(asdict(cfg)))
+ if cfg.display_data:
+ _init_rerun(session_name="recording")
+
+ robot = make_robot_from_config(cfg.robot)
+ teleop = make_teleoperator_from_config(cfg.teleop) if cfg.teleop is not None else None
+
+ action_features = hw_to_dataset_features(robot.action_features, "action", cfg.dataset.video)
+ obs_features = hw_to_dataset_features(robot.observation_features, "observation", cfg.dataset.video)
+ dataset_features = {**action_features, **obs_features}
+
+ if cfg.resume:
+ dataset = LeRobotDataset(
+ cfg.dataset.repo_id,
+ root=cfg.dataset.root,
+ batch_encoding_size=cfg.dataset.video_encoding_batch_size,
+ )
+
+ if hasattr(robot, "cameras") and len(robot.cameras) > 0:
+ dataset.start_image_writer(
+ num_processes=cfg.dataset.num_image_writer_processes,
+ num_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot.cameras),
+ )
+ sanity_check_dataset_robot_compatibility(dataset, robot, cfg.dataset.fps, dataset_features)
+ else:
+ # Create empty dataset or load existing saved episodes
+ sanity_check_dataset_name(cfg.dataset.repo_id, cfg.policy)
+ dataset = LeRobotDataset.create(
+ cfg.dataset.repo_id,
+ cfg.dataset.fps,
+ root=cfg.dataset.root,
+ robot_type=robot.name,
+ features=dataset_features,
+ use_videos=cfg.dataset.video,
+ image_writer_processes=cfg.dataset.num_image_writer_processes,
+ image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot.cameras),
+ batch_encoding_size=cfg.dataset.video_encoding_batch_size,
+ )
+
+ # Load pretrained policy
+ policy = None if cfg.policy is None else make_policy(cfg.policy, ds_meta=dataset.meta)
+
+ robot.connect()
+ if teleop is not None:
+ teleop.connect()
+
+ listener, events = init_keyboard_listener()
+
+ with VideoEncodingManager(dataset):
+ recorded_episodes = 0
+ while recorded_episodes < cfg.dataset.num_episodes and not events["stop_recording"]:
+ log_say(f"Recording episode {dataset.num_episodes}", cfg.play_sounds)
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=cfg.dataset.fps,
+ teleop=teleop,
+ policy=policy,
+ dataset=dataset,
+ control_time_s=cfg.dataset.episode_time_s,
+ single_task=cfg.dataset.single_task,
+ display_data=cfg.display_data,
+ )
+
+ # Execute a few seconds without recording to give time to manually reset the environment
+ # Skip reset for the last episode to be recorded
+ if not events["stop_recording"] and (
+ (recorded_episodes < cfg.dataset.num_episodes - 1) or events["rerecord_episode"]
+ ):
+ log_say("Reset the environment", cfg.play_sounds)
+ record_loop(
+ robot=robot,
+ events=events,
+ fps=cfg.dataset.fps,
+ teleop=teleop,
+ control_time_s=cfg.dataset.reset_time_s,
+ single_task=cfg.dataset.single_task,
+ display_data=cfg.display_data,
+ )
+
+ if events["rerecord_episode"]:
+ log_say("Re-record episode", cfg.play_sounds)
+ events["rerecord_episode"] = False
+ events["exit_early"] = False
+ dataset.clear_episode_buffer()
+ continue
+
+ dataset.save_episode()
+ recorded_episodes += 1
+
+ log_say("Stop recording", cfg.play_sounds, blocking=True)
+
+ robot.disconnect()
+ if teleop is not None:
+ teleop.disconnect()
+
+ if not is_headless() and listener is not None:
+ listener.stop()
+
+ if cfg.dataset.push_to_hub:
+ dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private)
+
+ log_say("Exiting", cfg.play_sounds)
+ return dataset
+
+
+if __name__ == "__main__":
+ record()
diff --git a/src/lerobot/replay.py b/src/lerobot/replay.py
new file mode 100644
index 0000000000..afe54d90f1
--- /dev/null
+++ b/src/lerobot/replay.py
@@ -0,0 +1,116 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Replays the actions of an episode from a dataset on a robot.
+
+Examples:
+
+```shell
+python -m lerobot.replay \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=black \
+ --dataset.repo_id=aliberts/record-test \
+ --dataset.episode=2
+```
+
+Example replay with bimanual so100:
+```shell
+python -m lerobot.replay \
+ --robot.type=bi_so100_follower \
+ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
+ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
+ --robot.id=bimanual_follower \
+ --dataset.repo_id=${HF_USER}/bimanual-so100-handover-cube \
+ --dataset.episode=0
+```
+
+"""
+
+import logging
+import time
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from pprint import pformat
+
+import draccus
+
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.robots import ( # noqa: F401
+ Robot,
+ RobotConfig,
+ bi_so100_follower,
+ hope_jr,
+ koch_follower,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import (
+ init_logging,
+ log_say,
+)
+
+
+@dataclass
+class DatasetReplayConfig:
+ # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
+ repo_id: str
+ # Episode to replay.
+ episode: int
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
+ root: str | Path | None = None
+ # Limit the frames per second. By default, uses the policy fps.
+ fps: int = 30
+
+
+@dataclass
+class ReplayConfig:
+ robot: RobotConfig
+ dataset: DatasetReplayConfig
+ # Use vocal synthesis to read events.
+ play_sounds: bool = True
+
+
+@draccus.wrap()
+def replay(cfg: ReplayConfig):
+ init_logging()
+ logging.info(pformat(asdict(cfg)))
+
+ robot = make_robot_from_config(cfg.robot)
+ dataset = LeRobotDataset(cfg.dataset.repo_id, root=cfg.dataset.root, episodes=[cfg.dataset.episode])
+ actions = dataset.hf_dataset.select_columns("action")
+ robot.connect()
+
+ log_say("Replaying episode", cfg.play_sounds, blocking=True)
+ for idx in range(dataset.num_frames):
+ start_episode_t = time.perf_counter()
+
+ action_array = actions[idx]["action"]
+ action = {}
+ for i, name in enumerate(dataset.features["action"]["names"]):
+ action[name] = action_array[i]
+
+ robot.send_action(action)
+
+ dt_s = time.perf_counter() - start_episode_t
+ busy_wait(1 / dataset.fps - dt_s)
+
+ robot.disconnect()
+
+
+if __name__ == "__main__":
+ replay()
diff --git a/src/lerobot/robots/__init__.py b/src/lerobot/robots/__init__.py
new file mode 100644
index 0000000000..d8fd0de931
--- /dev/null
+++ b/src/lerobot/robots/__init__.py
@@ -0,0 +1,3 @@
+from .config import RobotConfig
+from .robot import Robot
+from .utils import make_robot_from_config
diff --git a/src/lerobot/robots/bi_so100_follower/__init__.py b/src/lerobot/robots/bi_so100_follower/__init__.py
new file mode 100644
index 0000000000..90f56516b6
--- /dev/null
+++ b/src/lerobot/robots/bi_so100_follower/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .bi_so100_follower import BiSO100Follower
+from .config_bi_so100_follower import BiSO100FollowerConfig
diff --git a/src/lerobot/robots/bi_so100_follower/bi_so100_follower.py b/src/lerobot/robots/bi_so100_follower/bi_so100_follower.py
new file mode 100644
index 0000000000..7992b79fd4
--- /dev/null
+++ b/src/lerobot/robots/bi_so100_follower/bi_so100_follower.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.robots.so100_follower import SO100Follower
+from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
+
+from ..robot import Robot
+from .config_bi_so100_follower import BiSO100FollowerConfig
+
+logger = logging.getLogger(__name__)
+
+
+class BiSO100Follower(Robot):
+ """
+ [Bimanual SO-100 Follower Arms](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio
+ This bimanual robot can also be easily adapted to use SO-101 follower arms, just replace the SO100Follower class with SO101Follower and SO100FollowerConfig with SO101FollowerConfig.
+ """
+
+ config_class = BiSO100FollowerConfig
+ name = "bi_so100_follower"
+
+ def __init__(self, config: BiSO100FollowerConfig):
+ super().__init__(config)
+ self.config = config
+
+ left_arm_config = SO100FollowerConfig(
+ id=f"{config.id}_left" if config.id else None,
+ calibration_dir=config.calibration_dir,
+ port=config.left_arm_port,
+ disable_torque_on_disconnect=config.left_arm_disable_torque_on_disconnect,
+ max_relative_target=config.left_arm_max_relative_target,
+ use_degrees=config.left_arm_use_degrees,
+ cameras={},
+ )
+
+ right_arm_config = SO100FollowerConfig(
+ id=f"{config.id}_right" if config.id else None,
+ calibration_dir=config.calibration_dir,
+ port=config.right_arm_port,
+ disable_torque_on_disconnect=config.right_arm_disable_torque_on_disconnect,
+ max_relative_target=config.right_arm_max_relative_target,
+ use_degrees=config.right_arm_use_degrees,
+ cameras={},
+ )
+
+ self.left_arm = SO100Follower(left_arm_config)
+ self.right_arm = SO100Follower(right_arm_config)
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"left_{motor}.pos": float for motor in self.left_arm.bus.motors} | {
+ f"right_{motor}.pos": float for motor in self.right_arm.bus.motors
+ }
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return (
+ self.left_arm.bus.is_connected
+ and self.right_arm.bus.is_connected
+ and all(cam.is_connected for cam in self.cameras.values())
+ )
+
+ def connect(self, calibrate: bool = True) -> None:
+ self.left_arm.connect(calibrate)
+ self.right_arm.connect(calibrate)
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.left_arm.is_calibrated and self.right_arm.is_calibrated
+
+ def calibrate(self) -> None:
+ self.left_arm.calibrate()
+ self.right_arm.calibrate()
+
+ def configure(self) -> None:
+ self.left_arm.configure()
+ self.right_arm.configure()
+
+ def setup_motors(self) -> None:
+ self.left_arm.setup_motors()
+ self.right_arm.setup_motors()
+
+ def get_observation(self) -> dict[str, Any]:
+ obs_dict = {}
+
+ # Add "left_" prefix
+ left_obs = self.left_arm.get_observation()
+ obs_dict.update({f"left_{key}": value for key, value in left_obs.items()})
+
+ # Add "right_" prefix
+ right_obs = self.right_arm.get_observation()
+ obs_dict.update({f"right_{key}": value for key, value in right_obs.items()})
+
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ # Remove "left_" prefix
+ left_action = {
+ key.removeprefix("left_"): value for key, value in action.items() if key.startswith("left_")
+ }
+ # Remove "right_" prefix
+ right_action = {
+ key.removeprefix("right_"): value for key, value in action.items() if key.startswith("right_")
+ }
+
+ send_action_left = self.left_arm.send_action(left_action)
+ send_action_right = self.right_arm.send_action(right_action)
+
+ # Add prefixes back
+ prefixed_send_action_left = {f"left_{key}": value for key, value in send_action_left.items()}
+ prefixed_send_action_right = {f"right_{key}": value for key, value in send_action_right.items()}
+
+ return {**prefixed_send_action_left, **prefixed_send_action_right}
+
+ def disconnect(self):
+ self.left_arm.disconnect()
+ self.right_arm.disconnect()
+
+ for cam in self.cameras.values():
+ cam.disconnect()
diff --git a/src/lerobot/robots/bi_so100_follower/config_bi_so100_follower.py b/src/lerobot/robots/bi_so100_follower/config_bi_so100_follower.py
new file mode 100644
index 0000000000..00643b85f9
--- /dev/null
+++ b/src/lerobot/robots/bi_so100_follower/config_bi_so100_follower.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("bi_so100_follower")
+@dataclass
+class BiSO100FollowerConfig(RobotConfig):
+ left_arm_port: str
+ right_arm_port: str
+
+ # Optional
+ left_arm_disable_torque_on_disconnect: bool = True
+ left_arm_max_relative_target: int | None = None
+ left_arm_use_degrees: bool = False
+ right_arm_disable_torque_on_disconnect: bool = True
+ right_arm_max_relative_target: int | None = None
+ right_arm_use_degrees: bool = False
+
+ # cameras (shared between both arms)
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
diff --git a/src/lerobot/robots/config.py b/src/lerobot/robots/config.py
new file mode 100644
index 0000000000..a85a831693
--- /dev/null
+++ b/src/lerobot/robots/config.py
@@ -0,0 +1,40 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+from dataclasses import dataclass
+from pathlib import Path
+
+import draccus
+
+
+@dataclass(kw_only=True)
+class RobotConfig(draccus.ChoiceRegistry, abc.ABC):
+ # Allows to distinguish between different robots of the same type
+ id: str | None = None
+ # Directory to store calibration file
+ calibration_dir: Path | None = None
+
+ def __post_init__(self):
+ if hasattr(self, "cameras") and self.cameras:
+ for _, config in self.cameras.items():
+ for attr in ["width", "height", "fps"]:
+ if getattr(config, attr) is None:
+ raise ValueError(
+ f"Specifying '{attr}' is required for the camera to be used in a robot"
+ )
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
diff --git a/src/lerobot/robots/hope_jr/__init__.py b/src/lerobot/robots/hope_jr/__init__.py
new file mode 100644
index 0000000000..26603ebb08
--- /dev/null
+++ b/src/lerobot/robots/hope_jr/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_hope_jr import HopeJrArmConfig, HopeJrHandConfig
+from .hope_jr_arm import HopeJrArm
+from .hope_jr_hand import HopeJrHand
diff --git a/src/lerobot/robots/hope_jr/config_hope_jr.py b/src/lerobot/robots/hope_jr/config_hope_jr.py
new file mode 100644
index 0000000000..747e98e01a
--- /dev/null
+++ b/src/lerobot/robots/hope_jr/config_hope_jr.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("hope_jr_hand")
+@dataclass
+class HopeJrHandConfig(RobotConfig):
+ port: str # Port to connect to the hand
+ side: str # "left" / "right"
+
+ disable_torque_on_disconnect: bool = True
+
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
+
+ def __post_init__(self):
+ super().__post_init__()
+ if self.side not in ["right", "left"]:
+ raise ValueError(self.side)
+
+
+@RobotConfig.register_subclass("hope_jr_arm")
+@dataclass
+class HopeJrArmConfig(RobotConfig):
+ port: str # Port to connect to the hand
+ disable_torque_on_disconnect: bool = True
+
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ max_relative_target: int | None = None
+
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
diff --git a/src/lerobot/robots/hope_jr/hope_jr.mdx b/src/lerobot/robots/hope_jr/hope_jr.mdx
new file mode 100644
index 0000000000..72aa8f9239
--- /dev/null
+++ b/src/lerobot/robots/hope_jr/hope_jr.mdx
@@ -0,0 +1,277 @@
+# HopeJR
+
+## Prerequisites
+
+- [Hardware Setup](https://github.com/TheRobotStudio/HOPEJr)
+
+## Install LeRobot
+
+Follow the [installation instructions](https://github.com/huggingface/lerobot#installation) to install LeRobot.
+
+Install LeRobot with HopeJR dependencies:
+
+```bash
+pip install -e ".[hopejr]"
+```
+
+## Device Configuration
+
+Before starting calibration and operation, you need to identify the USB ports for each HopeJR component. Run this script to find the USB ports for the arm, hand, glove, and exoskeleton:
+
+```bash
+python -m lerobot.find_port
+```
+
+This will display the available USB ports and their associated devices. Make note of the port paths (e.g., `/dev/tty.usbmodem58760433331`, `/dev/tty.usbmodem11301`) as you'll need to specify them in the `--robot.port` and `--teleop.port` parameters when recording data, replaying episodes, or running teleoperation scripts.
+
+## Step 1: Calibration
+
+Before performing teleoperation, HopeJR's limbs need to be calibrated. Calibration files will be saved in `~/.cache/huggingface/lerobot/calibration`
+
+### 1.1 Calibrate Robot Hand
+
+```bash
+python -m lerobot.calibrate \
+ --robot.type=hope_jr_hand \
+ --robot.port=/dev/tty.usbmodem58760432281 \
+ --robot.id=blue \
+ --robot.side=right
+```
+
+When running the calibration script, a calibration GUI will pop up. Finger joints are named as follows:
+
+**Thumb**:
+
+- **CMC**: base joint connecting thumb to hand
+- **MCP**: knuckle joint
+- **PIP**: first finger joint
+- **DIP** : fingertip joint
+
+**Index, Middle, Ring, and Pinky fingers**:
+
+- **Radial flexor**: Moves base of finger towards the thumb
+- **Ulnar flexor**: Moves base of finger towards the pinky
+- **PIP/DIP**: Flexes the distal and proximal phalanx of the finger
+
+Each one of these will need to be calibrated individually via the GUI.
+Note that ulnar and radial flexors should have ranges of the same size (but with different offsets) in order to get symmetric movement.
+
+
+
+
+
+Use the calibration interface to set the range boundaries for each joint as shown above.
+
+
+
+
+
+Once you have set the appropriate boundaries for all joints, click "Save" to save the calibration values to the motors.
+
+### 1.2 Calibrate Teleoperator Glove
+
+```bash
+python -m lerobot.calibrate \
+ --teleop.type=homunculus_glove \
+ --teleop.port=/dev/tty.usbmodem11201 \
+ --teleop.id=red \
+ --teleop.side=right
+```
+
+Move each finger through its full range of motion, starting from the thumb.
+
+```
+Move thumb through its entire range of motion.
+Recording positions. Press ENTER to stop...
+
+-------------------------------------------
+NAME | MIN | POS | MAX
+thumb_cmc | 1790 | 1831 | 1853
+thumb_mcp | 1497 | 1514 | 1528
+thumb_pip | 1466 | 1496 | 1515
+thumb_dip | 1463 | 1484 | 1514
+```
+
+Continue with each finger:
+
+```
+Move middle through its entire range of motion.
+Recording positions. Press ENTER to stop...
+
+-------------------------------------------
+NAME | MIN | POS | MAX
+middle_mcp_abduction | 1598 | 1718 | 1820
+middle_mcp_flexion | 1512 | 1658 | 2136
+middle_dip | 1484 | 1500 | 1547
+```
+
+Once calibration is complete, the system will save the calibration to `/Users/your_username/.cache/huggingface/lerobot/calibration/teleoperators/homunculus_glove/red.json`
+
+### 1.3 Calibrate Robot Arm
+
+```bash
+python -m lerobot.calibrate \
+ --robot.type=hope_jr_arm \
+ --robot.port=/dev/tty.usbserial-1110 \
+ --robot.id=white
+```
+
+This will open a calibration GUI where you can set the range limits for each motor. The arm motions are organized as follows:
+
+- **Shoulder**: pitch, yaw, and roll
+- **Elbow**: flex
+- **Wrist**: pitch, yaw, and roll
+
+
+
+
+
+Use the calibration interface to set the range boundaries for each joint. Move each joint through its full range of motion and adjust the minimum and maximum values accordingly. Once you have set the appropriate boundaries for all joints, save the calibration.
+
+### 1.4 Calibrate Teleoperator Exoskeleton
+
+```bash
+python -m lerobot.calibrate \
+ --teleop.type=homunculus_arm \
+ --teleop.port=/dev/tty.usbmodem11201 \
+ --teleop.id=black
+```
+
+The exoskeleton allows one to control the robot arm. During calibration, you'll be prompted to move all joints through their full range of motion:
+
+```
+Move all joints through their entire range of motion.
+Recording positions. Press ENTER to stop...
+
+-------------------------------------------
+-------------------------------------------
+NAME | MIN | POS | MAX
+shoulder_pitch | 586 | 736 | 895
+shoulder_yaw | 1257 | 1374 | 1390
+shoulder_roll | 449 | 1034 | 2564
+elbow_flex | 3023 | 3117 | 3134
+wrist_roll | 3073 | 3096 | 3147
+wrist_yaw | 2143 | 2171 | 2185
+wrist_pitch | 1975 | 1993 | 2074
+Calibration saved to /Users/your_username/.cache/huggingface/lerobot/calibration/teleoperators/homunculus_arm/black.json
+```
+
+## Step 2: Teleoperation
+
+Due to global variable conflicts in the Feetech middleware, teleoperation for arm and hand must run in separate shell sessions:
+
+### Hand
+
+```bash
+python -m lerobot.teleoperate \
+ --robot.type=hope_jr_hand \
+ --robot.port=/dev/tty.usbmodem58760432281 \
+ --robot.id=blue \
+ --robot.side=right \
+ --teleop.type=homunculus_glove \
+ --teleop.port=/dev/tty.usbmodem11201 \
+ --teleop.id=red \
+ --teleop.side=right \
+ --display_data=true \
+ --fps=30
+```
+
+### Arm
+
+```bash
+python -m lerobot.teleoperate \
+ --robot.type=hope_jr_arm \
+ --robot.port=/dev/tty.usbserial-1110 \
+ --robot.id=white \
+ --teleop.type=homunculus_arm \
+ --teleop.port=/dev/tty.usbmodem11201 \
+ --teleop.id=black \
+ --display_data=true \
+ --fps=30
+```
+
+## Step 3: Record, Replay, Train
+
+Record, Replay and Train with Hope-JR is still experimental.
+
+### Record
+
+This step records the dataset, which can be seen as an example [here](https://huggingface.co/datasets/nepyope/hand_record_test_with_video_data/settings).
+
+```bash
+python -m lerobot.record \
+ --robot.type=hope_jr_hand \
+ --robot.port=/dev/tty.usbmodem58760432281 \
+ --robot.id=right \
+ --robot.side=right \
+ --robot.cameras='{"main": {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}}' \
+ --teleop.type=homunculus_glove \
+ --teleop.port=/dev/tty.usbmodem1201 \
+ --teleop.id=right \
+ --teleop.side=right \
+ --dataset.repo_id=nepyope/hand_record_test_with_video_data \
+ --dataset.single_task="Hand recording test with video data" \
+ --dataset.num_episodes=1 \
+ --dataset.episode_time_s=5 \
+ --dataset.push_to_hub=true \
+ --dataset.private=true \
+ --display_data=true
+```
+
+### Replay
+
+```bash
+python -m lerobot.replay \
+ --robot.type=hope_jr_hand \
+ --robot.port=/dev/tty.usbmodem58760432281 \
+ --robot.id=right \
+ --robot.side=right \
+ --dataset.repo_id=nepyope/hand_record_test_with_camera \
+ --dataset.episode=0
+```
+
+### Train
+
+```bash
+python -m lerobot.scripts.train \
+ --dataset.repo_id=nepyope/hand_record_test_with_video_data \
+ --policy.type=act \
+ --output_dir=outputs/train/hopejr_hand \
+ --job_name=hopejr \
+ --policy.device=mps \
+ --wandb.enable=true \
+ --policy.repo_id=nepyope/hand_test_policy
+```
+
+### Evaluate
+
+This training run can be viewed as an example [here](https://wandb.ai/tino/lerobot/runs/rp0k8zvw?nw=nwusertino).
+
+```bash
+python -m lerobot.record \
+ --robot.type=hope_jr_hand \
+ --robot.port=/dev/tty.usbmodem58760432281 \
+ --robot.id=right \
+ --robot.side=right \
+ --robot.cameras='{"main": {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}}' \
+ --display_data=false \
+ --dataset.repo_id=nepyope/eval_hopejr \
+ --dataset.single_task="Evaluate hopejr hand policy" \
+ --dataset.num_episodes=10 \
+ --policy.path=outputs/train/hopejr_hand/checkpoints/last/pretrained_model
+```
diff --git a/src/lerobot/robots/hope_jr/hope_jr_arm.py b/src/lerobot/robots/hope_jr/hope_jr_arm.py
new file mode 100644
index 0000000000..0e3a615a91
--- /dev/null
+++ b/src/lerobot/robots/hope_jr/hope_jr_arm.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorNormMode
+from lerobot.motors.calibration_gui import RangeFinderGUI
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+)
+
+from ..robot import Robot
+from ..utils import ensure_safe_goal_position
+from .config_hope_jr import HopeJrArmConfig
+
+logger = logging.getLogger(__name__)
+
+
+class HopeJrArm(Robot):
+ config_class = HopeJrArmConfig
+ name = "hope_jr_arm"
+
+ def __init__(self, config: HopeJrArmConfig):
+ super().__init__(config)
+ self.config = config
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pitch": Motor(1, "sm8512bl", MotorNormMode.RANGE_M100_100),
+ "shoulder_yaw": Motor(2, "sts3250", MotorNormMode.RANGE_M100_100),
+ "shoulder_roll": Motor(3, "sts3250", MotorNormMode.RANGE_M100_100),
+ "elbow_flex": Motor(4, "sts3250", MotorNormMode.RANGE_M100_100),
+ "wrist_roll": Motor(5, "sts3250", MotorNormMode.RANGE_M100_100),
+ "wrist_yaw": Motor(6, "sts3250", MotorNormMode.RANGE_M100_100),
+ "wrist_pitch": Motor(7, "sts3250", MotorNormMode.RANGE_M100_100),
+ },
+ calibration=self.calibration,
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ # HACK
+ self.shoulder_pitch = "shoulder_pitch"
+ self.other_motors = [m for m in self.bus.motors if m != "shoulder_pitch"]
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ We assume that at connection time, arm is in a rest position,
+ and torque can be safely disabled to run calibration.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect(handshake=False)
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ # Connect the cameras
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self, limb_name: str = None) -> None:
+ groups = {
+ "all": list(self.bus.motors.keys()),
+ "shoulder": ["shoulder_pitch", "shoulder_yaw", "shoulder_roll"],
+ "elbow": ["elbow_flex"],
+ "wrist": ["wrist_roll", "wrist_yaw", "wrist_pitch"],
+ }
+
+ self.calibration = RangeFinderGUI(self.bus, groups).run()
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors(maximum_acceleration=30, acceleration=30)
+
+ def setup_motors(self) -> None:
+ # TODO: add docstring
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Read arm position
+ start = time.perf_counter()
+ obs_dict = self.bus.sync_read("Present_Position", self.other_motors)
+ obs_dict[self.shoulder_pitch] = self.bus.read("Present_Position", self.shoulder_pitch)
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
+
+ # Cap goal position when too far away from present position.
+ # /!\ Slower fps expected due to reading from the follower.
+ if self.config.max_relative_target is not None:
+ present_pos = self.bus.sync_read("Present_Position")
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
+
+ self.bus.sync_write("Goal_Position", goal_pos)
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/robots/hope_jr/hope_jr_hand.py b/src/lerobot/robots/hope_jr/hope_jr_hand.py
new file mode 100644
index 0000000000..8dc100e06e
--- /dev/null
+++ b/src/lerobot/robots/hope_jr/hope_jr_hand.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorNormMode
+from lerobot.motors.calibration_gui import RangeFinderGUI
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+)
+
+from ..robot import Robot
+from .config_hope_jr import HopeJrHandConfig
+
+logger = logging.getLogger(__name__)
+
+RIGHT_HAND_INVERSIONS = [
+ "thumb_mcp",
+ "thumb_dip",
+ "index_ulnar_flexor",
+ "middle_ulnar_flexor",
+ "ring_ulnar_flexor",
+ "ring_pip_dip",
+ "pinky_ulnar_flexor",
+ "pinky_pip_dip",
+]
+
+LEFT_HAND_INVERSIONS = [
+ "thumb_cmc",
+ "thumb_mcp",
+ "thumb_dip",
+ "index_radial_flexor",
+ "index_pip_dip",
+ "middle_radial_flexor",
+ "middle_pip_dip",
+ "ring_radial_flexor",
+ "ring_pip_dip",
+ "pinky_radial_flexor",
+ # "pinky_pip_dip",
+]
+
+
+class HopeJrHand(Robot):
+ config_class = HopeJrHandConfig
+ name = "hope_jr_hand"
+
+ def __init__(self, config: HopeJrHandConfig):
+ super().__init__(config)
+ self.config = config
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ # Thumb
+ "thumb_cmc": Motor(1, "scs0009", MotorNormMode.RANGE_0_100),
+ "thumb_mcp": Motor(2, "scs0009", MotorNormMode.RANGE_0_100),
+ "thumb_pip": Motor(3, "scs0009", MotorNormMode.RANGE_0_100),
+ "thumb_dip": Motor(4, "scs0009", MotorNormMode.RANGE_0_100),
+ # Index
+ "index_radial_flexor": Motor(5, "scs0009", MotorNormMode.RANGE_0_100),
+ "index_ulnar_flexor": Motor(6, "scs0009", MotorNormMode.RANGE_0_100),
+ "index_pip_dip": Motor(7, "scs0009", MotorNormMode.RANGE_0_100),
+ # Middle
+ "middle_radial_flexor": Motor(8, "scs0009", MotorNormMode.RANGE_0_100),
+ "middle_ulnar_flexor": Motor(9, "scs0009", MotorNormMode.RANGE_0_100),
+ "middle_pip_dip": Motor(10, "scs0009", MotorNormMode.RANGE_0_100),
+ # Ring
+ "ring_radial_flexor": Motor(11, "scs0009", MotorNormMode.RANGE_0_100),
+ "ring_ulnar_flexor": Motor(12, "scs0009", MotorNormMode.RANGE_0_100),
+ "ring_pip_dip": Motor(13, "scs0009", MotorNormMode.RANGE_0_100),
+ # Pinky
+ "pinky_radial_flexor": Motor(14, "scs0009", MotorNormMode.RANGE_0_100),
+ "pinky_ulnar_flexor": Motor(15, "scs0009", MotorNormMode.RANGE_0_100),
+ "pinky_pip_dip": Motor(16, "scs0009", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ protocol_version=1,
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+ self.inverted_motors = RIGHT_HAND_INVERSIONS if config.side == "right" else LEFT_HAND_INVERSIONS
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ # Connect the cameras
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ fingers = {}
+ for finger in ["thumb", "index", "middle", "ring", "pinky"]:
+ fingers[finger] = [motor for motor in self.bus.motors if motor.startswith(finger)]
+
+ self.calibration = RangeFinderGUI(self.bus, fingers).run()
+ for motor in self.inverted_motors:
+ self.calibration[motor].drive_mode = 1
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors()
+
+ def setup_motors(self) -> None:
+ # TODO: add docstring
+ for motor in self.bus.motors:
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ obs_dict = {}
+
+ # Read hand position
+ start = time.perf_counter()
+ for motor in self.bus.motors:
+ obs_dict[f"{motor}.pos"] = self.bus.read("Present_Position", motor)
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
+ self.bus.sync_write("Goal_Position", goal_pos)
+ return action
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/robots/koch_follower/__init__.py b/src/lerobot/robots/koch_follower/__init__.py
new file mode 100644
index 0000000000..6271c4e557
--- /dev/null
+++ b/src/lerobot/robots/koch_follower/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_koch_follower import KochFollowerConfig
+from .koch_follower import KochFollower
diff --git a/src/lerobot/robots/koch_follower/config_koch_follower.py b/src/lerobot/robots/koch_follower/config_koch_follower.py
new file mode 100644
index 0000000000..a7c9249ae9
--- /dev/null
+++ b/src/lerobot/robots/koch_follower/config_koch_follower.py
@@ -0,0 +1,39 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("koch_follower")
+@dataclass
+class KochFollowerConfig(RobotConfig):
+ # Port to connect to the arm
+ port: str
+
+ disable_torque_on_disconnect: bool = True
+
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ max_relative_target: int | None = None
+
+ # cameras
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
+
+ # Set to `True` for backward compatibility with previous policies/dataset
+ use_degrees: bool = False
diff --git a/src/lerobot/robots/koch_follower/koch.mdx b/src/lerobot/robots/koch_follower/koch.mdx
new file mode 100644
index 0000000000..d0b991e748
--- /dev/null
+++ b/src/lerobot/robots/koch_follower/koch.mdx
@@ -0,0 +1,283 @@
+# Koch v1.1
+
+In the steps below, we explain how to assemble the Koch v1.1 robot.
+
+## Order and assemble the parts
+
+Follow the sourcing and assembling instructions provided in this [README](https://github.com/jess-moss/koch-v1-1). This will guide you through setting up both the follower and leader arms, as shown in the image below.
+
+For a visual walkthrough of the assembly process, you can refer to [this video tutorial](https://youtu.be/8nQIg9BwwTk).
+
+> [!WARNING]
+> Since the production of this video, we simplified the configuration phase. Because of this, two things differ from the instructions in that video:
+>
+> - Don't plug in all the motor cables right away and wait to be instructed to do so in [Configure the motors](#configure-the-motors).
+> - Don't screw in the controller board (PCB) to the base right away and wait for being instructed to do so in [Configure the motors](#configure-the-motors).
+
+## Install LeRobot 🤗
+
+To install LeRobot follow, our [Installation Guide](./installation)
+
+In addition to these instructions, you need to install the Dynamixel SDK:
+
+```bash
+pip install -e ".[dynamixel]"
+```
+
+## Configure the motors
+
+### 1. Find the USB ports associated with each arm
+
+To find the port for each bus servo adapter, run this script:
+
+```bash
+python -m lerobot.find_port
+```
+
+
+
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
+Remove the USB cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/tty.usbmodem575E0032081
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm.
+
+
+
+
+On Linux, you might need to give access to the USB ports by running:
+
+```bash
+sudo chmod 666 /dev/ttyACM0
+sudo chmod 666 /dev/ttyACM1
+```
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/ttyACM0', '/dev/ttyACM1']
+Remove the usb cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/ttyACM1
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm.
+
+
+
+
+### 2. Set the motors ids and baudrates
+
+Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate.
+
+To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once.
+
+If you are repurposing motors from another robot, you will probably also need to perform this step, as the ids and baudrate likely won't match.
+
+#### Follower
+
+Connect the usb cable from your computer and the 5V power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter.
+
+For a visual reference on how to set the motor ids please refer to [this video](https://huggingface.co/docs/lerobot/en/so101#setup-motors-video) where we follow the process for the SO101 arm.
+
+
+
+
+```bash
+python -m lerobot.setup_motors \
+ --robot.type=koch_follower \
+ --robot.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
+```
+
+
+
+
+
+```python
+from lerobot.robots.koch_follower import KochFollower, KochFollowerConfig
+
+config = KochFollowerConfig(
+ port="/dev/tty.usbmodem575E0031751",
+ id="my_awesome_follower_arm",
+)
+follower = KochFollower(config)
+follower.setup_motors()
+```
+
+
+
+
+
+You should see the following instruction.
+
+```
+Connect the controller board to the 'gripper' motor only and press enter.
+```
+
+As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor.
+
+
+Troubleshooting
+
+If you get an error at that point, check your cables and make sure they are plugged in properly:
+
+
+
Power supply
+
USB cable between your computer and the controller board
+
The 3-pin cable from the controller board to the motor
+
+
+If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
+
+
+
+You should then see the following message:
+
+```
+'gripper' motor id set to 6
+```
+
+Followed by the next instruction:
+
+```
+Connect the controller board to the 'wrist_roll' motor only and press enter.
+```
+
+You can disconnect the 3-pin cable from the controller board but you can leave it connected to the gripper motor on the other end as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one.
+
+Repeat the operation for each motor as instructed.
+
+> [!TIP]
+> Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board.
+
+When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm.
+
+#### Leader
+
+Do the same steps for the leader arm but modify the command or script accordingly.
+
+
+
+
+```bash
+python -m lerobot.setup_motors \
+ --teleop.type=koch_leader \
+ --teleop.port=/dev/tty.usbmodem575E0031751 \ # <- paste here the port found at previous step
+```
+
+
+
+
+
+```python
+from lerobot.teleoperators.koch_leader import KochLeader, KochLeaderConfig
+
+config = KochLeaderConfig(
+ port="/dev/tty.usbmodem575E0031751",
+ id="my_awesome_leader_arm",
+)
+leader = KochLeader(config)
+leader.setup_motors()
+```
+
+
+
+
+
+## Calibrate
+
+Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position.
+The calibration process is very important because it allows a neural network trained on one robot to work on another.
+
+#### Follower
+
+Run the following command or API example to calibrate the follower arm:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --robot.type=koch_follower \
+ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --robot.id=my_awesome_follower_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower
+
+config = KochFollowerConfig(
+ port="/dev/tty.usbmodem585A0076891",
+ id="my_awesome_follower_arm",
+)
+
+follower = KochFollower(config)
+follower.connect(calibrate=False)
+follower.calibrate()
+follower.disconnect()
+```
+
+
+
+
+
+We unified the calibration method for most robots. Thus, the calibration steps for this Koch arm are the same as the steps for the SO100 and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video).
+
+#### Leader
+
+Do the same steps to calibrate the leader arm, run the following command or API example:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --teleop.type=koch_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader
+
+config = KochLeaderConfig(
+ port="/dev/tty.usbmodem575E0031751",
+ id="my_awesome_leader_arm",
+)
+
+leader = KochLeader(config)
+leader.connect(calibrate=False)
+leader.calibrate()
+leader.disconnect()
+```
+
+
+
+
+
+Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
diff --git a/src/lerobot/robots/koch_follower/koch_follower.py b/src/lerobot/robots/koch_follower/koch_follower.py
new file mode 100644
index 0000000000..1cfc6cf08b
--- /dev/null
+++ b/src/lerobot/robots/koch_follower/koch_follower.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.dynamixel import (
+ DynamixelMotorsBus,
+ OperatingMode,
+)
+
+from ..robot import Robot
+from ..utils import ensure_safe_goal_position
+from .config_koch_follower import KochFollowerConfig
+
+logger = logging.getLogger(__name__)
+
+
+class KochFollower(Robot):
+ """
+ - [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow
+ expansion, developed by Alexander Koch from [Tau Robotics](https://tau-robotics.com)
+ - [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss
+ """
+
+ config_class = KochFollowerConfig
+ name = "koch_follower"
+
+ def __init__(self, config: KochFollowerConfig):
+ super().__init__(config)
+ self.config = config
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
+ self.bus = DynamixelMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "xl430-w250", norm_mode_body),
+ "shoulder_lift": Motor(2, "xl430-w250", norm_mode_body),
+ "elbow_flex": Motor(3, "xl330-m288", norm_mode_body),
+ "wrist_flex": Motor(4, "xl330-m288", norm_mode_body),
+ "wrist_roll": Motor(5, "xl330-m288", norm_mode_body),
+ "gripper": Motor(6, "xl330-m288", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ We assume that at connection time, arm is in a rest position,
+ and torque can be safely disabled to run calibration.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ full_turn_motors = ["shoulder_pan", "wrist_roll"]
+ unknown_range_motors = [motor for motor in self.bus.motors if motor not in full_turn_motors]
+ print(
+ f"Move all joints except {full_turn_motors} sequentially through their entire "
+ "ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ for motor in full_turn_motors:
+ range_mins[motor] = 0
+ range_maxes[motor] = 4095
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ logger.info(f"Calibration saved to {self.calibration_fpath}")
+
+ def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors()
+ # Use 'extended position mode' for all motors except gripper, because in joint mode the servos
+ # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling
+ # the arm, you could end up with a servo with a position 0 or 4095 at a crucial point
+ for motor in self.bus.motors:
+ if motor != "gripper":
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ # Use 'position control current based' for gripper to be limited by the limit of the current. For
+ # the follower gripper, it means it can grasp an object without forcing too much even tho, its
+ # goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
+ # For the leader gripper, it means we can use it as a physical trigger, since we can force with
+ # our finger to make it move, and it will move back to its original target position when we
+ # release the force.
+ self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value)
+
+ # Set better PID values to close the gap between recorded states and actions
+ # TODO(rcadene): Implement an automatic procedure to set optimal PID values for each motor
+ self.bus.write("Position_P_Gain", "elbow_flex", 1500)
+ self.bus.write("Position_I_Gain", "elbow_flex", 0)
+ self.bus.write("Position_D_Gain", "elbow_flex", 600)
+
+ def setup_motors(self) -> None:
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Read arm position
+ start = time.perf_counter()
+ obs_dict = self.bus.sync_read("Present_Position")
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, float]) -> dict[str, float]:
+ """Command arm to move to a target joint configuration.
+
+ The relative action magnitude may be clipped depending on the configuration parameter
+ `max_relative_target`. In this case, the action sent differs from original action.
+ Thus, this function always returns the action actually sent.
+
+ Args:
+ action (dict[str, float]): The goal positions for the motors.
+
+ Returns:
+ dict[str, float]: The action sent to the motors, potentially clipped.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
+
+ # Cap goal position when too far away from present position.
+ # /!\ Slower fps expected due to reading from the follower.
+ if self.config.max_relative_target is not None:
+ present_pos = self.bus.sync_read("Present_Position")
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
+
+ # Send goal position to the arm
+ self.bus.sync_write("Goal_Position", goal_pos)
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/robots/lekiwi/__init__.py b/src/lerobot/robots/lekiwi/__init__.py
new file mode 100644
index 0000000000..ada2ff3684
--- /dev/null
+++ b/src/lerobot/robots/lekiwi/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_lekiwi import LeKiwiClientConfig, LeKiwiConfig
+from .lekiwi import LeKiwi
+from .lekiwi_client import LeKiwiClient
diff --git a/src/lerobot/robots/lekiwi/config_lekiwi.py b/src/lerobot/robots/lekiwi/config_lekiwi.py
new file mode 100644
index 0000000000..f0f8c24b31
--- /dev/null
+++ b/src/lerobot/robots/lekiwi/config_lekiwi.py
@@ -0,0 +1,96 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras.configs import CameraConfig, Cv2Rotation
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+
+from ..config import RobotConfig
+
+
+def lekiwi_cameras_config() -> dict[str, CameraConfig]:
+ return {
+ "front": OpenCVCameraConfig(
+ index_or_path="/dev/video0", fps=30, width=640, height=480, rotation=Cv2Rotation.ROTATE_180
+ ),
+ "wrist": OpenCVCameraConfig(
+ index_or_path="/dev/video2", fps=30, width=480, height=640, rotation=Cv2Rotation.ROTATE_90
+ ),
+ }
+
+
+@RobotConfig.register_subclass("lekiwi")
+@dataclass
+class LeKiwiConfig(RobotConfig):
+ port: str = "/dev/ttyACM0" # port to connect to the bus
+
+ disable_torque_on_disconnect: bool = True
+
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ max_relative_target: int | None = None
+
+ cameras: dict[str, CameraConfig] = field(default_factory=lekiwi_cameras_config)
+
+ # Set to `True` for backward compatibility with previous policies/dataset
+ use_degrees: bool = False
+
+
+@dataclass
+class LeKiwiHostConfig:
+ # Network Configuration
+ port_zmq_cmd: int = 5555
+ port_zmq_observations: int = 5556
+
+ # Duration of the application
+ connection_time_s: int = 30
+
+ # Watchdog: stop the robot if no command is received for over 0.5 seconds.
+ watchdog_timeout_ms: int = 500
+
+ # If robot jitters decrease the frequency and monitor cpu load with `top` in cmd
+ max_loop_freq_hz: int = 30
+
+
+@RobotConfig.register_subclass("lekiwi_client")
+@dataclass
+class LeKiwiClientConfig(RobotConfig):
+ # Network Configuration
+ remote_ip: str
+ port_zmq_cmd: int = 5555
+ port_zmq_observations: int = 5556
+
+ teleop_keys: dict[str, str] = field(
+ default_factory=lambda: {
+ # Movement
+ "forward": "w",
+ "backward": "s",
+ "left": "a",
+ "right": "d",
+ "rotate_left": "z",
+ "rotate_right": "x",
+ # Speed control
+ "speed_up": "r",
+ "speed_down": "f",
+ # quit teleop
+ "quit": "q",
+ }
+ )
+
+ cameras: dict[str, CameraConfig] = field(default_factory=lekiwi_cameras_config)
+
+ polling_timeout_ms: int = 15
+ connect_timeout_s: int = 5
diff --git a/src/lerobot/robots/lekiwi/lekiwi.mdx b/src/lerobot/robots/lekiwi/lekiwi.mdx
new file mode 100644
index 0000000000..bb70fd26b7
--- /dev/null
+++ b/src/lerobot/robots/lekiwi/lekiwi.mdx
@@ -0,0 +1,337 @@
+# LeKiwi
+
+In the steps below, we explain how to assemble the LeKiwi mobile robot.
+
+## Source the parts
+
+Follow this [README](https://github.com/SIGRobotics-UIUC/LeKiwi). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts.
+And advise if it's your first time printing or if you don't own a 3D printer.
+
+### Wired version
+
+If you have the **wired** LeKiwi version, you can skip the installation of the Raspberry Pi and setting up SSH. You can also run all commands directly on your PC for both the LeKiwi scripts and the leader arm scripts for teleoperating.
+
+## Install software on Pi
+
+Now we have to set up the remote PC that will run on the LeKiwi Robot. This is normally a Raspberry Pi, but can be any PC that can run on 5V and has enough usb ports (2 or more) for the cameras and motor control board.
+
+### Install OS
+
+For setting up the Raspberry Pi and its SD-card see: [Setup PI](https://www.raspberrypi.com/documentation/computers/getting-started.html). Here is explained how to download the [Imager](https://www.raspberrypi.com/software/) to install Raspberry Pi OS or Ubuntu.
+
+### Setup SSH
+
+After setting up your Pi, you should enable and set up [SSH](https://www.raspberrypi.com/news/coding-on-raspberry-pi-remotely-with-visual-studio-code/) (Secure Shell Protocol) so you can log in to the Pi from your laptop without requiring a screen, keyboard, and mouse on the Pi. A great tutorial on how to do this can be found [here](https://www.raspberrypi.com/documentation/computers/remote-access.html#ssh). Logging into your Pi can be done in your Command Prompt (cmd) or, if you use VSCode you can use [this](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) extension.
+
+### Install LeRobot on Pi 🤗
+
+On your Raspberry Pi install LeRobot using our [Installation Guide](./installation)
+
+In addition to these instructions, you need to install the Feetech SDK & ZeroMQ on your Pi:
+
+```bash
+pip install -e ".[lekiwi]"
+```
+
+## Install LeRobot locally
+
+If you already have installed LeRobot on your laptop/pc you can skip this step; otherwise, please follow along as we do the same steps we did on the Pi.
+
+Follow our [Installation Guide](./installation)
+
+In addition to these instructions, you need to install the Feetech SDK & ZeroMQ on your laptop/pc:
+
+```bash
+pip install -e ".[lekiwi]"
+```
+
+Great :hugs:! You are now done installing LeRobot, and we can begin assembling the SO100/SO101 arms and the mobile base :robot:.
+Every time you now want to use LeRobot, you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
+
+# Step-by-Step Assembly Instructions
+
+First, we will assemble the two SO100/SO101 arms. One to attach to the mobile base and one for teleoperation. Then we will assemble the mobile base. The instructions for assembling can be found on these two pages:
+
+- [Assemble SO101](./so101#step-by-step-assembly-instructions)
+- [Assemble LeKiwi](https://github.com/SIGRobotics-UIUC/LeKiwi/blob/main/Assembly.md)
+
+### Find the USB ports associated with motor board
+
+To find the port for each bus servo adapter, run this script:
+
+```bash
+python -m lerobot.find_port
+```
+
+
+
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/tty.usbmodem575E0032081']
+Remove the USB cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/tty.usbmodem575E0032081
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your board.
+
+
+
+
+On Linux, you might need to give access to the USB ports by running:
+
+```bash
+sudo chmod 666 /dev/ttyACM0
+sudo chmod 666 /dev/ttyACM1
+```
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/ttyACM0']
+Remove the usb cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/ttyACM0
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/ttyACM0` corresponding to your board.
+
+
+
+
+### Configure motors
+
+The instructions for configuring the motors can be found in the SO101 [docs](./so101#configure-the-motors). Besides the ids for the arm motors, we also need to set the motor ids for the mobile base. These need to be in a specific order to work. Below an image of the motor ids and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ids for the wheels are 7, 8 and 9.
+
+You can run this command to setup motors for LeKiwi. It will first setup the motors for arm (id 6..1) and then setup motors for wheels (9,8,7)
+
+```bash
+python -m lerobot.setup_motors \
+ --robot.type=lekiwi \
+ --robot.port=/dev/tty.usbmodem58760431551 # <- paste here the port found at previous step
+```
+
+
+
+### Troubleshoot communication
+
+If you are having trouble connecting to the Mobile SO100, follow these steps to diagnose and resolve the issue.
+
+#### 1. Verify IP Address Configuration
+
+Make sure that the correct IP for the Pi is used in the commands or in your code. To check the Raspberry Pi's IP address, run (on the Pi command line):
+
+```bash
+hostname -I
+```
+
+#### 2. Check if Pi is reachable from laptop/pc
+
+Try pinging the Raspberry Pi from your laptop:
+
+```bach
+ping
+```
+
+If the ping fails:
+
+- Ensure the Pi is powered on and connected to the same network.
+- Check if SSH is enabled on the Pi.
+
+#### 3. Try SSH connection
+
+If you can't SSH into the Pi, it might not be properly connected. Use:
+
+```bash
+ssh @
+```
+
+If you get a connection error:
+
+- Ensure SSH is enabled on the Pi by running:
+ ```bash
+ sudo raspi-config
+ ```
+ Then navigate to: **Interfacing Options -> SSH** and enable it.
+
+### Calibration
+
+Now we have to calibrate the leader arm and the follower arm. The wheel motors don't have to be calibrated.
+The calibration process is very important because it allows a neural network trained on one robot to work on another.
+
+### Calibrate follower arm (on mobile base)
+
+Make sure the arm is connected to the Raspberry Pi and run this script or API example (on the Raspberry Pi via SSH) to launch calibration of the follower arm:
+
+```bash
+python -m lerobot.calibrate \
+ --robot.type=lekiwi \
+ --robot.id=my_awesome_kiwi # <- Give the robot a unique name
+```
+
+We unified the calibration method for most robots, thus, the calibration steps for this SO100 arm are the same as the steps for the Koch and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video).
+
+### Wired version
+
+If you have the **wired** LeKiwi version, please run all commands on your laptop.
+
+### Calibrate leader arm
+
+Then, to calibrate the leader arm (which is attached to the laptop/pc). Run the following command of API example on your laptop:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.teleoperators.so100_leader import SO100LeaderConfig, SO100Leader
+
+config = SO100LeaderConfig(
+ port="/dev/tty.usbmodem58760431551",
+ id="my_awesome_leader_arm",
+)
+
+leader = SO100Leader(config)
+leader.connect(calibrate=False)
+leader.calibrate()
+leader.disconnect()
+```
+
+
+
+
+
+## Teleoperate LeKiwi
+
+> [!TIP]
+> If you're using a Mac, you might need to give Terminal permission to access your keyboard for teleoperation. Go to System Preferences > Security & Privacy > Input Monitoring and check the box for Terminal.
+
+To teleoperate, SSH into your Raspberry Pi, and run `conda activate lerobot` and this command:
+
+```bash
+python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi
+```
+
+Then on your laptop, also run `conda activate lerobot` and run the API example, make sure you set the correct `remote_ip` and `port` in `examples/lekiwi/teleoperate.py`.
+
+```bash
+python examples/lekiwi/teleoperate.py
+```
+
+You should see on your laptop something like this: `[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below:
+
+| Speed Mode | Linear Speed (m/s) | Rotation Speed (deg/s) |
+| ---------- | ------------------ | ---------------------- |
+| Fast | 0.4 | 90 |
+| Medium | 0.25 | 60 |
+| Slow | 0.1 | 30 |
+
+| Key | Action |
+| --- | -------------- |
+| W | Move forward |
+| A | Move left |
+| S | Move backward |
+| D | Move right |
+| Z | Turn left |
+| X | Turn right |
+| R | Increase speed |
+| F | Decrease speed |
+
+> [!TIP]
+> If you use a different keyboard, you can change the keys for each command in the [`LeKiwiConfig`](../src/lerobot/robot_devices/robots/configs.py).
+
+### Wired version
+
+If you have the **wired** LeKiwi version, please run all commands on your laptop.
+
+## Record a dataset
+
+Once you're familiar with teleoperation, you can record your first dataset.
+
+We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
+
+Add your token to the CLI by running this command:
+
+```bash
+huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
+```
+
+Then store your Hugging Face repository name in a variable:
+
+```bash
+HF_USER=$(huggingface-cli whoami | head -n 1)
+echo $HF_USER
+```
+
+Now you can record a dataset. To record episodes and upload your dataset to the hub, execute this API example tailored for LeKiwi. Make sure to first adapt the `remote_ip`, `repo_id`, `port` and `task` in the script. If you would like to run the script for longer you can increase `NB_CYCLES_CLIENT_CONNECTION`.
+
+```bash
+python examples/lekiwi/record.py
+```
+
+#### Dataset upload
+
+Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
+
+```bash
+echo https://huggingface.co/datasets/${HF_USER}/so101_test
+```
+
+Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
+
+You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
+
+#### Tips for gathering data
+
+Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images.
+
+In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
+
+Avoid adding too much variation too quickly, as it may hinder your results.
+
+If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset.
+
+#### Troubleshooting:
+
+- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
+
+## Replay an episode
+
+To replay an episode run the API example below, make sure to change `remote_ip`, `port`, LeRobotDatasetId and episode index.
+
+```bash
+python examples/lekiwi/replay.py
+```
+
+Congrats 🎉, your robot is all set to learn a task on its own. Start training it by the training part of this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
+
+## Evaluate your policy
+
+To evaluate your policy run the `evaluate.py` API example, make sure to change `remote_ip`, `port`, model..
+
+```bash
+python examples/lekiwi/evaluate.py
+```
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
diff --git a/src/lerobot/robots/lekiwi/lekiwi.py b/src/lerobot/robots/lekiwi/lekiwi.py
new file mode 100644
index 0000000000..ff1465d8be
--- /dev/null
+++ b/src/lerobot/robots/lekiwi/lekiwi.py
@@ -0,0 +1,411 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from itertools import chain
+from typing import Any
+
+import numpy as np
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+ OperatingMode,
+)
+
+from ..robot import Robot
+from ..utils import ensure_safe_goal_position
+from .config_lekiwi import LeKiwiConfig
+
+logger = logging.getLogger(__name__)
+
+
+class LeKiwi(Robot):
+ """
+ The robot includes a three omniwheel mobile base and a remote follower arm.
+ The leader arm is connected locally (on the laptop) and its joint positions are recorded and then
+ forwarded to the remote follower arm (after applying a safety clamp).
+ In parallel, keyboard teleoperation is used to generate raw velocity commands for the wheels.
+ """
+
+ config_class = LeKiwiConfig
+ name = "lekiwi"
+
+ def __init__(self, config: LeKiwiConfig):
+ super().__init__(config)
+ self.config = config
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ # arm
+ "arm_shoulder_pan": Motor(1, "sts3215", norm_mode_body),
+ "arm_shoulder_lift": Motor(2, "sts3215", norm_mode_body),
+ "arm_elbow_flex": Motor(3, "sts3215", norm_mode_body),
+ "arm_wrist_flex": Motor(4, "sts3215", norm_mode_body),
+ "arm_wrist_roll": Motor(5, "sts3215", norm_mode_body),
+ "arm_gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
+ # base
+ "base_left_wheel": Motor(7, "sts3215", MotorNormMode.RANGE_M100_100),
+ "base_back_wheel": Motor(8, "sts3215", MotorNormMode.RANGE_M100_100),
+ "base_right_wheel": Motor(9, "sts3215", MotorNormMode.RANGE_M100_100),
+ },
+ calibration=self.calibration,
+ )
+ self.arm_motors = [motor for motor in self.bus.motors if motor.startswith("arm")]
+ self.base_motors = [motor for motor in self.bus.motors if motor.startswith("base")]
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ @property
+ def _state_ft(self) -> dict[str, type]:
+ return dict.fromkeys(
+ (
+ "arm_shoulder_pan.pos",
+ "arm_shoulder_lift.pos",
+ "arm_elbow_flex.pos",
+ "arm_wrist_flex.pos",
+ "arm_wrist_roll.pos",
+ "arm_gripper.pos",
+ "x.vel",
+ "y.vel",
+ "theta.vel",
+ ),
+ float,
+ )
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._state_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._state_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+
+ motors = self.arm_motors + self.base_motors
+
+ self.bus.disable_torque(self.arm_motors)
+ for name in self.arm_motors:
+ self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value)
+
+ input("Move robot to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings(self.arm_motors)
+
+ homing_offsets.update(dict.fromkeys(self.base_motors, 0))
+
+ full_turn_motor = [
+ motor for motor in motors if any(keyword in motor for keyword in ["wheel", "wrist"])
+ ]
+ unknown_range_motors = [motor for motor in motors if motor not in full_turn_motor]
+
+ print(
+ f"Move all arm joints except '{full_turn_motor}' sequentially through their "
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ for name in full_turn_motor:
+ range_mins[name] = 0
+ range_maxes[name] = 4095
+
+ self.calibration = {}
+ for name, motor in self.bus.motors.items():
+ self.calibration[name] = MotorCalibration(
+ id=motor.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[name],
+ range_min=range_mins[name],
+ range_max=range_maxes[name],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ def configure(self):
+ # Set-up arm actuators (position mode)
+ # We assume that at connection time, arm is in a rest position,
+ # and torque can be safely disabled to run calibration.
+ self.bus.disable_torque()
+ self.bus.configure_motors()
+ for name in self.arm_motors:
+ self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value)
+ # Set P_Coefficient to lower value to avoid shakiness (Default is 32)
+ self.bus.write("P_Coefficient", name, 16)
+ # Set I_Coefficient and D_Coefficient to default value 0 and 32
+ self.bus.write("I_Coefficient", name, 0)
+ self.bus.write("D_Coefficient", name, 32)
+
+ for name in self.base_motors:
+ self.bus.write("Operating_Mode", name, OperatingMode.VELOCITY.value)
+
+ self.bus.enable_torque()
+
+ def setup_motors(self) -> None:
+ for motor in chain(reversed(self.arm_motors), reversed(self.base_motors)):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ @staticmethod
+ def _degps_to_raw(degps: float) -> int:
+ steps_per_deg = 4096.0 / 360.0
+ speed_in_steps = degps * steps_per_deg
+ speed_int = int(round(speed_in_steps))
+ # Cap the value to fit within signed 16-bit range (-32768 to 32767)
+ if speed_int > 0x7FFF:
+ speed_int = 0x7FFF # 32767 -> maximum positive value
+ elif speed_int < -0x8000:
+ speed_int = -0x8000 # -32768 -> minimum negative value
+ return speed_int
+
+ @staticmethod
+ def _raw_to_degps(raw_speed: int) -> float:
+ steps_per_deg = 4096.0 / 360.0
+ magnitude = raw_speed
+ degps = magnitude / steps_per_deg
+ return degps
+
+ def _body_to_wheel_raw(
+ self,
+ x: float,
+ y: float,
+ theta: float,
+ wheel_radius: float = 0.05,
+ base_radius: float = 0.125,
+ max_raw: int = 3000,
+ ) -> dict:
+ """
+ Convert desired body-frame velocities into wheel raw commands.
+
+ Parameters:
+ x_cmd : Linear velocity in x (m/s).
+ y_cmd : Linear velocity in y (m/s).
+ theta_cmd : Rotational velocity (deg/s).
+ wheel_radius: Radius of each wheel (meters).
+ base_radius : Distance from the center of rotation to each wheel (meters).
+ max_raw : Maximum allowed raw command (ticks) per wheel.
+
+ Returns:
+ A dictionary with wheel raw commands:
+ {"base_left_wheel": value, "base_back_wheel": value, "base_right_wheel": value}.
+
+ Notes:
+ - Internally, the method converts theta_cmd to rad/s for the kinematics.
+ - The raw command is computed from the wheels angular speed in deg/s
+ using _degps_to_raw(). If any command exceeds max_raw, all commands
+ are scaled down proportionally.
+ """
+ # Convert rotational velocity from deg/s to rad/s.
+ theta_rad = theta * (np.pi / 180.0)
+ # Create the body velocity vector [x, y, theta_rad].
+ velocity_vector = np.array([x, y, theta_rad])
+
+ # Define the wheel mounting angles with a -90° offset.
+ angles = np.radians(np.array([240, 0, 120]) - 90)
+ # Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed.
+ # The third column (base_radius) accounts for the effect of rotation.
+ m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
+
+ # Compute each wheel’s linear speed (m/s) and then its angular speed (rad/s).
+ wheel_linear_speeds = m.dot(velocity_vector)
+ wheel_angular_speeds = wheel_linear_speeds / wheel_radius
+
+ # Convert wheel angular speeds from rad/s to deg/s.
+ wheel_degps = wheel_angular_speeds * (180.0 / np.pi)
+
+ # Scaling
+ steps_per_deg = 4096.0 / 360.0
+ raw_floats = [abs(degps) * steps_per_deg for degps in wheel_degps]
+ max_raw_computed = max(raw_floats)
+ if max_raw_computed > max_raw:
+ scale = max_raw / max_raw_computed
+ wheel_degps = wheel_degps * scale
+
+ # Convert each wheel’s angular speed (deg/s) to a raw integer.
+ wheel_raw = [self._degps_to_raw(deg) for deg in wheel_degps]
+
+ return {
+ "base_left_wheel": wheel_raw[0],
+ "base_back_wheel": wheel_raw[1],
+ "base_right_wheel": wheel_raw[2],
+ }
+
+ def _wheel_raw_to_body(
+ self,
+ left_wheel_speed,
+ back_wheel_speed,
+ right_wheel_speed,
+ wheel_radius: float = 0.05,
+ base_radius: float = 0.125,
+ ) -> dict[str, Any]:
+ """
+ Convert wheel raw command feedback back into body-frame velocities.
+
+ Parameters:
+ wheel_raw : Vector with raw wheel commands ("base_left_wheel", "base_back_wheel", "base_right_wheel").
+ wheel_radius: Radius of each wheel (meters).
+ base_radius : Distance from the robot center to each wheel (meters).
+
+ Returns:
+ A dict (x.vel, y.vel, theta.vel) all in m/s
+ """
+
+ # Convert each raw command back to an angular speed in deg/s.
+ wheel_degps = np.array(
+ [
+ self._raw_to_degps(left_wheel_speed),
+ self._raw_to_degps(back_wheel_speed),
+ self._raw_to_degps(right_wheel_speed),
+ ]
+ )
+
+ # Convert from deg/s to rad/s.
+ wheel_radps = wheel_degps * (np.pi / 180.0)
+ # Compute each wheel’s linear speed (m/s) from its angular speed.
+ wheel_linear_speeds = wheel_radps * wheel_radius
+
+ # Define the wheel mounting angles with a -90° offset.
+ angles = np.radians(np.array([240, 0, 120]) - 90)
+ m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
+
+ # Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds.
+ m_inv = np.linalg.inv(m)
+ velocity_vector = m_inv.dot(wheel_linear_speeds)
+ x, y, theta_rad = velocity_vector
+ theta = theta_rad * (180.0 / np.pi)
+ return {
+ "x.vel": x,
+ "y.vel": y,
+ "theta.vel": theta,
+ } # m/s and deg/s
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Read actuators position for arm and vel for base
+ start = time.perf_counter()
+ arm_pos = self.bus.sync_read("Present_Position", self.arm_motors)
+ base_wheel_vel = self.bus.sync_read("Present_Velocity", self.base_motors)
+
+ base_vel = self._wheel_raw_to_body(
+ base_wheel_vel["base_left_wheel"],
+ base_wheel_vel["base_back_wheel"],
+ base_wheel_vel["base_right_wheel"],
+ )
+
+ arm_state = {f"{k}.pos": v for k, v in arm_pos.items()}
+
+ obs_dict = {**arm_state, **base_vel}
+
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ """Command lekiwi to move to a target joint configuration.
+
+ The relative action magnitude may be clipped depending on the configuration parameter
+ `max_relative_target`. In this case, the action sent differs from original action.
+ Thus, this function always returns the action actually sent.
+
+ Raises:
+ RobotDeviceNotConnectedError: if robot is not connected.
+
+ Returns:
+ np.ndarray: the action sent to the motors, potentially clipped.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ arm_goal_pos = {k: v for k, v in action.items() if k.endswith(".pos")}
+ base_goal_vel = {k: v for k, v in action.items() if k.endswith(".vel")}
+
+ base_wheel_goal_vel = self._body_to_wheel_raw(
+ base_goal_vel["x.vel"], base_goal_vel["y.vel"], base_goal_vel["theta.vel"]
+ )
+
+ # Cap goal position when too far away from present position.
+ # /!\ Slower fps expected due to reading from the follower.
+ if self.config.max_relative_target is not None:
+ present_pos = self.bus.sync_read("Present_Position", self.arm_motors)
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in arm_goal_pos.items()}
+ arm_safe_goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
+ arm_goal_pos = arm_safe_goal_pos
+
+ # Send goal position to the actuators
+ arm_goal_pos_raw = {k.replace(".pos", ""): v for k, v in arm_goal_pos.items()}
+ self.bus.sync_write("Goal_Position", arm_goal_pos_raw)
+ self.bus.sync_write("Goal_Velocity", base_wheel_goal_vel)
+
+ return {**arm_goal_pos, **base_goal_vel}
+
+ def stop_base(self):
+ self.bus.sync_write("Goal_Velocity", dict.fromkeys(self.base_motors, 0), num_retry=5)
+ logger.info("Base motors stopped")
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.stop_base()
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/robots/lekiwi/lekiwi_client.py b/src/lerobot/robots/lekiwi/lekiwi_client.py
new file mode 100644
index 0000000000..9a80014013
--- /dev/null
+++ b/src/lerobot/robots/lekiwi/lekiwi_client.py
@@ -0,0 +1,345 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(aliberts, Steven, Pepijn): use gRPC calls instead of zmq?
+
+import base64
+import json
+import logging
+from functools import cached_property
+from typing import Any
+
+import cv2
+import numpy as np
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+
+from ..robot import Robot
+from .config_lekiwi import LeKiwiClientConfig
+
+
+class LeKiwiClient(Robot):
+ config_class = LeKiwiClientConfig
+ name = "lekiwi_client"
+
+ def __init__(self, config: LeKiwiClientConfig):
+ import zmq
+
+ self._zmq = zmq
+ super().__init__(config)
+ self.config = config
+ self.id = config.id
+ self.robot_type = config.type
+
+ self.remote_ip = config.remote_ip
+ self.port_zmq_cmd = config.port_zmq_cmd
+ self.port_zmq_observations = config.port_zmq_observations
+
+ self.teleop_keys = config.teleop_keys
+
+ self.polling_timeout_ms = config.polling_timeout_ms
+ self.connect_timeout_s = config.connect_timeout_s
+
+ self.zmq_context = None
+ self.zmq_cmd_socket = None
+ self.zmq_observation_socket = None
+
+ self.last_frames = {}
+
+ self.last_remote_state = {}
+
+ # Define three speed levels and a current index
+ self.speed_levels = [
+ {"xy": 0.1, "theta": 30}, # slow
+ {"xy": 0.2, "theta": 60}, # medium
+ {"xy": 0.3, "theta": 90}, # fast
+ ]
+ self.speed_index = 0 # Start at slow
+
+ self._is_connected = False
+ self.logs = {}
+
+ @cached_property
+ def _state_ft(self) -> dict[str, type]:
+ return dict.fromkeys(
+ (
+ "arm_shoulder_pan.pos",
+ "arm_shoulder_lift.pos",
+ "arm_elbow_flex.pos",
+ "arm_wrist_flex.pos",
+ "arm_wrist_roll.pos",
+ "arm_gripper.pos",
+ "x.vel",
+ "y.vel",
+ "theta.vel",
+ ),
+ float,
+ )
+
+ @cached_property
+ def _state_order(self) -> tuple[str, ...]:
+ return tuple(self._state_ft.keys())
+
+ @cached_property
+ def _cameras_ft(self) -> dict[str, tuple[int, int, int]]:
+ return {name: (cfg.height, cfg.width, 3) for name, cfg in self.config.cameras.items()}
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._state_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._state_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self._is_connected
+
+ @property
+ def is_calibrated(self) -> bool:
+ pass
+
+ def connect(self) -> None:
+ """Establishes ZMQ sockets with the remote mobile robot"""
+
+ if self._is_connected:
+ raise DeviceAlreadyConnectedError(
+ "LeKiwi Daemon is already connected. Do not run `robot.connect()` twice."
+ )
+
+ zmq = self._zmq
+ self.zmq_context = zmq.Context()
+ self.zmq_cmd_socket = self.zmq_context.socket(zmq.PUSH)
+ zmq_cmd_locator = f"tcp://{self.remote_ip}:{self.port_zmq_cmd}"
+ self.zmq_cmd_socket.connect(zmq_cmd_locator)
+ self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1)
+
+ self.zmq_observation_socket = self.zmq_context.socket(zmq.PULL)
+ zmq_observations_locator = f"tcp://{self.remote_ip}:{self.port_zmq_observations}"
+ self.zmq_observation_socket.connect(zmq_observations_locator)
+ self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1)
+
+ poller = zmq.Poller()
+ poller.register(self.zmq_observation_socket, zmq.POLLIN)
+ socks = dict(poller.poll(self.connect_timeout_s * 1000))
+ if self.zmq_observation_socket not in socks or socks[self.zmq_observation_socket] != zmq.POLLIN:
+ raise DeviceNotConnectedError("Timeout waiting for LeKiwi Host to connect expired.")
+
+ self._is_connected = True
+
+ def calibrate(self) -> None:
+ pass
+
+ def _poll_and_get_latest_message(self) -> str | None:
+ """Polls the ZMQ socket for a limited time and returns the latest message string."""
+ zmq = self._zmq
+ poller = zmq.Poller()
+ poller.register(self.zmq_observation_socket, zmq.POLLIN)
+
+ try:
+ socks = dict(poller.poll(self.polling_timeout_ms))
+ except zmq.ZMQError as e:
+ logging.error(f"ZMQ polling error: {e}")
+ return None
+
+ if self.zmq_observation_socket not in socks:
+ logging.info("No new data available within timeout.")
+ return None
+
+ last_msg = None
+ while True:
+ try:
+ msg = self.zmq_observation_socket.recv_string(zmq.NOBLOCK)
+ last_msg = msg
+ except zmq.Again:
+ break
+
+ if last_msg is None:
+ logging.warning("Poller indicated data, but failed to retrieve message.")
+
+ return last_msg
+
+ def _parse_observation_json(self, obs_string: str) -> dict[str, Any] | None:
+ """Parses the JSON observation string."""
+ try:
+ return json.loads(obs_string)
+ except json.JSONDecodeError as e:
+ logging.error(f"Error decoding JSON observation: {e}")
+ return None
+
+ def _decode_image_from_b64(self, image_b64: str) -> np.ndarray | None:
+ """Decodes a base64 encoded image string to an OpenCV image."""
+ if not image_b64:
+ return None
+ try:
+ jpg_data = base64.b64decode(image_b64)
+ np_arr = np.frombuffer(jpg_data, dtype=np.uint8)
+ frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
+ if frame is None:
+ logging.warning("cv2.imdecode returned None for an image.")
+ return frame
+ except (TypeError, ValueError) as e:
+ logging.error(f"Error decoding base64 image data: {e}")
+ return None
+
+ def _remote_state_from_obs(
+ self, observation: dict[str, Any]
+ ) -> tuple[dict[str, np.ndarray], dict[str, Any]]:
+ """Extracts frames, and state from the parsed observation."""
+
+ flat_state = {key: observation.get(key, 0.0) for key in self._state_order}
+
+ state_vec = np.array([flat_state[key] for key in self._state_order], dtype=np.float32)
+
+ obs_dict: dict[str, Any] = {**flat_state, "observation.state": state_vec}
+
+ # Decode images
+ current_frames: dict[str, np.ndarray] = {}
+ for cam_name, image_b64 in observation.items():
+ if cam_name not in self._cameras_ft:
+ continue
+ frame = self._decode_image_from_b64(image_b64)
+ if frame is not None:
+ current_frames[cam_name] = frame
+
+ return current_frames, obs_dict
+
+ def _get_data(self) -> tuple[dict[str, np.ndarray], dict[str, Any], dict[str, Any]]:
+ """
+ Polls the video socket for the latest observation data.
+
+ Attempts to retrieve and decode the latest message within a short timeout.
+ If successful, updates and returns the new frames, speed, and arm state.
+ If no new data arrives or decoding fails, returns the last known values.
+ """
+
+ # 1. Get the latest message string from the socket
+ latest_message_str = self._poll_and_get_latest_message()
+
+ # 2. If no message, return cached data
+ if latest_message_str is None:
+ return self.last_frames, self.last_remote_state
+
+ # 3. Parse the JSON message
+ observation = self._parse_observation_json(latest_message_str)
+
+ # 4. If JSON parsing failed, return cached data
+ if observation is None:
+ return self.last_frames, self.last_remote_state
+
+ # 5. Process the valid observation data
+ try:
+ new_frames, new_state = self._remote_state_from_obs(observation)
+ except Exception as e:
+ logging.error(f"Error processing observation data, serving last observation: {e}")
+ return self.last_frames, self.last_remote_state
+
+ self.last_frames = new_frames
+ self.last_remote_state = new_state
+
+ return new_frames, new_state
+
+ def get_observation(self) -> dict[str, Any]:
+ """
+ Capture observations from the remote robot: current follower arm positions,
+ present wheel speeds (converted to body-frame velocities: x, y, theta),
+ and a camera frame. Receives over ZMQ, translate to body-frame vel
+ """
+ if not self._is_connected:
+ raise DeviceNotConnectedError("LeKiwiClient is not connected. You need to run `robot.connect()`.")
+
+ frames, obs_dict = self._get_data()
+
+ # Loop over each configured camera
+ for cam_name, frame in frames.items():
+ if frame is None:
+ logging.warning("Frame is None")
+ frame = np.zeros((640, 480, 3), dtype=np.uint8)
+ obs_dict[cam_name] = frame
+
+ return obs_dict
+
+ def _from_keyboard_to_base_action(self, pressed_keys: np.ndarray):
+ # Speed control
+ if self.teleop_keys["speed_up"] in pressed_keys:
+ self.speed_index = min(self.speed_index + 1, 2)
+ if self.teleop_keys["speed_down"] in pressed_keys:
+ self.speed_index = max(self.speed_index - 1, 0)
+ speed_setting = self.speed_levels[self.speed_index]
+ xy_speed = speed_setting["xy"] # e.g. 0.1, 0.25, or 0.4
+ theta_speed = speed_setting["theta"] # e.g. 30, 60, or 90
+
+ x_cmd = 0.0 # m/s forward/backward
+ y_cmd = 0.0 # m/s lateral
+ theta_cmd = 0.0 # deg/s rotation
+
+ if self.teleop_keys["forward"] in pressed_keys:
+ x_cmd += xy_speed
+ if self.teleop_keys["backward"] in pressed_keys:
+ x_cmd -= xy_speed
+ if self.teleop_keys["left"] in pressed_keys:
+ y_cmd += xy_speed
+ if self.teleop_keys["right"] in pressed_keys:
+ y_cmd -= xy_speed
+ if self.teleop_keys["rotate_left"] in pressed_keys:
+ theta_cmd += theta_speed
+ if self.teleop_keys["rotate_right"] in pressed_keys:
+ theta_cmd -= theta_speed
+ return {
+ "x.vel": x_cmd,
+ "y.vel": y_cmd,
+ "theta.vel": theta_cmd,
+ }
+
+ def configure(self):
+ pass
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ """Command lekiwi to move to a target joint configuration. Translates to motor space + sends over ZMQ
+
+ Args:
+ action (np.ndarray): array containing the goal positions for the motors.
+
+ Raises:
+ RobotDeviceNotConnectedError: if robot is not connected.
+
+ Returns:
+ np.ndarray: the action sent to the motors, potentially clipped.
+ """
+ if not self._is_connected:
+ raise DeviceNotConnectedError(
+ "ManipulatorRobot is not connected. You need to run `robot.connect()`."
+ )
+
+ self.zmq_cmd_socket.send_string(json.dumps(action)) # action is in motor space
+
+ # TODO(Steven): Remove the np conversion when it is possible to record a non-numpy array value
+ actions = np.array([action.get(k, 0.0) for k in self._state_order], dtype=np.float32)
+
+ action_sent = {key: actions[i] for i, key in enumerate(self._state_order)}
+ action_sent["action"] = actions
+ return action_sent
+
+ def disconnect(self):
+ """Cleans ZMQ comms"""
+
+ if not self._is_connected:
+ raise DeviceNotConnectedError(
+ "LeKiwi is not connected. You need to run `robot.connect()` before disconnecting."
+ )
+ self.zmq_observation_socket.close()
+ self.zmq_cmd_socket.close()
+ self.zmq_context.term()
+ self._is_connected = False
diff --git a/src/lerobot/robots/lekiwi/lekiwi_host.py b/src/lerobot/robots/lekiwi/lekiwi_host.py
new file mode 100644
index 0000000000..1155cf71c2
--- /dev/null
+++ b/src/lerobot/robots/lekiwi/lekiwi_host.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import logging
+import time
+
+import cv2
+import zmq
+
+from .config_lekiwi import LeKiwiConfig, LeKiwiHostConfig
+from .lekiwi import LeKiwi
+
+
+class LeKiwiHost:
+ def __init__(self, config: LeKiwiHostConfig):
+ self.zmq_context = zmq.Context()
+ self.zmq_cmd_socket = self.zmq_context.socket(zmq.PULL)
+ self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1)
+ self.zmq_cmd_socket.bind(f"tcp://*:{config.port_zmq_cmd}")
+
+ self.zmq_observation_socket = self.zmq_context.socket(zmq.PUSH)
+ self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1)
+ self.zmq_observation_socket.bind(f"tcp://*:{config.port_zmq_observations}")
+
+ self.connection_time_s = config.connection_time_s
+ self.watchdog_timeout_ms = config.watchdog_timeout_ms
+ self.max_loop_freq_hz = config.max_loop_freq_hz
+
+ def disconnect(self):
+ self.zmq_observation_socket.close()
+ self.zmq_cmd_socket.close()
+ self.zmq_context.term()
+
+
+def main():
+ logging.info("Configuring LeKiwi")
+ robot_config = LeKiwiConfig()
+ robot = LeKiwi(robot_config)
+
+ logging.info("Connecting LeKiwi")
+ robot.connect()
+
+ logging.info("Starting HostAgent")
+ host_config = LeKiwiHostConfig()
+ host = LeKiwiHost(host_config)
+
+ last_cmd_time = time.time()
+ watchdog_active = False
+ logging.info("Waiting for commands...")
+ try:
+ # Business logic
+ start = time.perf_counter()
+ duration = 0
+ while duration < host.connection_time_s:
+ loop_start_time = time.time()
+ try:
+ msg = host.zmq_cmd_socket.recv_string(zmq.NOBLOCK)
+ data = dict(json.loads(msg))
+ _action_sent = robot.send_action(data)
+ last_cmd_time = time.time()
+ watchdog_active = False
+ except zmq.Again:
+ if not watchdog_active:
+ logging.warning("No command available")
+ except Exception as e:
+ logging.error("Message fetching failed: %s", e)
+
+ now = time.time()
+ if (now - last_cmd_time > host.watchdog_timeout_ms / 1000) and not watchdog_active:
+ logging.warning(
+ f"Command not received for more than {host.watchdog_timeout_ms} milliseconds. Stopping the base."
+ )
+ watchdog_active = True
+ robot.stop_base()
+
+ last_observation = robot.get_observation()
+
+ # Encode ndarrays to base64 strings
+ for cam_key, _ in robot.cameras.items():
+ ret, buffer = cv2.imencode(
+ ".jpg", last_observation[cam_key], [int(cv2.IMWRITE_JPEG_QUALITY), 90]
+ )
+ if ret:
+ last_observation[cam_key] = base64.b64encode(buffer).decode("utf-8")
+ else:
+ last_observation[cam_key] = ""
+
+ # Send the observation to the remote agent
+ try:
+ host.zmq_observation_socket.send_string(json.dumps(last_observation), flags=zmq.NOBLOCK)
+ except zmq.Again:
+ logging.info("Dropping observation, no client connected")
+
+ # Ensure a short sleep to avoid overloading the CPU.
+ elapsed = time.time() - loop_start_time
+
+ time.sleep(max(1 / host.max_loop_freq_hz - elapsed, 0))
+ duration = time.perf_counter() - start
+ print("Cycle time reached.")
+
+ except KeyboardInterrupt:
+ print("Keyboard interrupt received. Exiting...")
+ finally:
+ print("Shutting down Lekiwi Host.")
+ robot.disconnect()
+ host.disconnect()
+
+ logging.info("Finished LeKiwi cleanly")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/lerobot/robots/robot.py b/src/lerobot/robots/robot.py
new file mode 100644
index 0000000000..2a90043809
--- /dev/null
+++ b/src/lerobot/robots/robot.py
@@ -0,0 +1,185 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import builtins
+from pathlib import Path
+from typing import Any
+
+import draccus
+
+from lerobot.constants import HF_LEROBOT_CALIBRATION, ROBOTS
+from lerobot.motors import MotorCalibration
+
+from .config import RobotConfig
+
+
+# TODO(aliberts): action/obs typing such as Generic[ObsType, ActType] similar to gym.Env ?
+# https://github.com/Farama-Foundation/Gymnasium/blob/3287c869f9a48d99454306b0d4b4ec537f0f35e3/gymnasium/core.py#L23
+class Robot(abc.ABC):
+ """
+ The base abstract class for all LeRobot-compatible robots.
+
+ This class provides a standardized interface for interacting with physical robots.
+ Subclasses must implement all abstract methods and properties to be usable.
+
+ Attributes:
+ config_class (RobotConfig): The expected configuration class for this robot.
+ name (str): The unique robot name used to identify this robot type.
+ """
+
+ # Set these in ALL subclasses
+ config_class: builtins.type[RobotConfig]
+ name: str
+
+ def __init__(self, config: RobotConfig):
+ self.robot_type = self.name
+ self.id = config.id
+ self.calibration_dir = (
+ config.calibration_dir if config.calibration_dir else HF_LEROBOT_CALIBRATION / ROBOTS / self.name
+ )
+ self.calibration_dir.mkdir(parents=True, exist_ok=True)
+ self.calibration_fpath = self.calibration_dir / f"{self.id}.json"
+ self.calibration: dict[str, MotorCalibration] = {}
+ if self.calibration_fpath.is_file():
+ self._load_calibration()
+
+ def __str__(self) -> str:
+ return f"{self.id} {self.__class__.__name__}"
+
+ # TODO(aliberts): create a proper Feature class for this that links with datasets
+ @property
+ @abc.abstractmethod
+ def observation_features(self) -> dict:
+ """
+ A dictionary describing the structure and types of the observations produced by the robot.
+ Its structure (keys) should match the structure of what is returned by :pymeth:`get_observation`.
+ Values for the dict should either be:
+ - The type of the value if it's a simple value, e.g. `float` for single proprioceptive value (a joint's position/velocity)
+ - A tuple representing the shape if it's an array-type value, e.g. `(height, width, channel)` for images
+
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
+ """
+ pass
+
+ @property
+ @abc.abstractmethod
+ def action_features(self) -> dict:
+ """
+ A dictionary describing the structure and types of the actions expected by the robot. Its structure
+ (keys) should match the structure of what is passed to :pymeth:`send_action`. Values for the dict
+ should be the type of the value if it's a simple value, e.g. `float` for single proprioceptive value
+ (a joint's goal position/velocity)
+
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
+ """
+ pass
+
+ @property
+ @abc.abstractmethod
+ def is_connected(self) -> bool:
+ """
+ Whether the robot is currently connected or not. If `False`, calling :pymeth:`get_observation` or
+ :pymeth:`send_action` should raise an error.
+ """
+ pass
+
+ @abc.abstractmethod
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ Establish communication with the robot.
+
+ Args:
+ calibrate (bool): If True, automatically calibrate the robot after connecting if it's not
+ calibrated or needs calibration (this is hardware-dependant).
+ """
+ pass
+
+ @property
+ @abc.abstractmethod
+ def is_calibrated(self) -> bool:
+ """Whether the robot is currently calibrated or not. Should be always `True` if not applicable"""
+ pass
+
+ @abc.abstractmethod
+ def calibrate(self) -> None:
+ """
+ Calibrate the robot if applicable. If not, this should be a no-op.
+
+ This method should collect any necessary data (e.g., motor offsets) and update the
+ :pyattr:`calibration` dictionary accordingly.
+ """
+ pass
+
+ def _load_calibration(self, fpath: Path | None = None) -> None:
+ """
+ Helper to load calibration data from the specified file.
+
+ Args:
+ fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`.
+ """
+ fpath = self.calibration_fpath if fpath is None else fpath
+ with open(fpath) as f, draccus.config_type("json"):
+ self.calibration = draccus.load(dict[str, MotorCalibration], f)
+
+ def _save_calibration(self, fpath: Path | None = None) -> None:
+ """
+ Helper to save calibration data to the specified file.
+
+ Args:
+ fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`.
+ """
+ fpath = self.calibration_fpath if fpath is None else fpath
+ with open(fpath, "w") as f, draccus.config_type("json"):
+ draccus.dump(self.calibration, f, indent=4)
+
+ @abc.abstractmethod
+ def configure(self) -> None:
+ """
+ Apply any one-time or runtime configuration to the robot.
+ This may include setting motor parameters, control modes, or initial state.
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_observation(self) -> dict[str, Any]:
+ """
+ Retrieve the current observation from the robot.
+
+ Returns:
+ dict[str, Any]: A flat dictionary representing the robot's current sensory state. Its structure
+ should match :pymeth:`observation_features`.
+ """
+
+ pass
+
+ @abc.abstractmethod
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ """
+ Send an action command to the robot.
+
+ Args:
+ action (dict[str, Any]): Dictionary representing the desired action. Its structure should match
+ :pymeth:`action_features`.
+
+ Returns:
+ dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by
+ safety limits on velocity.
+ """
+ pass
+
+ @abc.abstractmethod
+ def disconnect(self) -> None:
+ """Disconnect from the robot and perform any necessary cleanup."""
+ pass
diff --git a/src/lerobot/robots/so100_follower/__init__.py b/src/lerobot/robots/so100_follower/__init__.py
new file mode 100644
index 0000000000..b995aab137
--- /dev/null
+++ b/src/lerobot/robots/so100_follower/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_so100_follower import SO100FollowerConfig, SO100FollowerEndEffectorConfig
+from .so100_follower import SO100Follower
+from .so100_follower_end_effector import SO100FollowerEndEffector
diff --git a/src/lerobot/robots/so100_follower/config_so100_follower.py b/src/lerobot/robots/so100_follower/config_so100_follower.py
new file mode 100644
index 0000000000..ea8b9f1c27
--- /dev/null
+++ b/src/lerobot/robots/so100_follower/config_so100_follower.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("so100_follower")
+@dataclass
+class SO100FollowerConfig(RobotConfig):
+ # Port to connect to the arm
+ port: str
+
+ disable_torque_on_disconnect: bool = True
+
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ max_relative_target: int | None = None
+
+ # cameras
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
+
+ # Set to `True` for backward compatibility with previous policies/dataset
+ use_degrees: bool = False
+
+
+@RobotConfig.register_subclass("so100_follower_end_effector")
+@dataclass
+class SO100FollowerEndEffectorConfig(SO100FollowerConfig):
+ """Configuration for the SO100FollowerEndEffector robot."""
+
+ # Path to URDF file for kinematics
+ # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo:
+ # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
+ urdf_path: str | None = None
+
+ # End-effector frame name in the URDF
+ target_frame_name: str = "gripper_frame_link"
+
+ # Default bounds for the end-effector position (in meters)
+ end_effector_bounds: dict[str, list[float]] = field(
+ default_factory=lambda: {
+ "min": [-1.0, -1.0, -1.0], # min x, y, z
+ "max": [1.0, 1.0, 1.0], # max x, y, z
+ }
+ )
+
+ max_gripper_pos: float = 50
+
+ end_effector_step_sizes: dict[str, float] = field(
+ default_factory=lambda: {
+ "x": 0.02,
+ "y": 0.02,
+ "z": 0.02,
+ }
+ )
diff --git a/src/lerobot/robots/so100_follower/so100.mdx b/src/lerobot/robots/so100_follower/so100.mdx
new file mode 100644
index 0000000000..d9ff922c55
--- /dev/null
+++ b/src/lerobot/robots/so100_follower/so100.mdx
@@ -0,0 +1,640 @@
+# SO-100
+
+In the steps below, we explain how to assemble the SO-100 robot.
+
+## Source the parts
+
+Follow this [README](https://github.com/TheRobotStudio/SO-ARM100/blob/main/SO100.md). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts. And advise if it's your first time printing or if you don't own a 3D printer.
+
+## Install LeRobot 🤗
+
+To install LeRobot, follow our [Installation Guide](./installation)
+
+In addition to these instructions, you need to install the Feetech SDK:
+
+```bash
+pip install -e ".[feetech]"
+```
+
+## Configure the motors
+
+**Note:**
+Unlike the SO-101, the motor connectors are not easily accessible once the arm is assembled, so the configuration step must be done beforehand.
+
+### 1. Find the USB ports associated with each arm
+
+To find the port for each bus servo adapter, run this script:
+
+```bash
+python -m lerobot.find_port
+```
+
+
+
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
+Remove the USB cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/tty.usbmodem575E0032081
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm.
+
+
+
+
+On Linux, you might need to give access to the USB ports by running:
+
+```bash
+sudo chmod 666 /dev/ttyACM0
+sudo chmod 666 /dev/ttyACM1
+```
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/ttyACM0', '/dev/ttyACM1']
+Remove the usb cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/ttyACM1
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm.
+
+
+
+
+### 2. Set the motors ids and baudrates
+
+Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate.
+
+To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once.
+
+If you are repurposing motors from another robot, you will probably also need to perform this step as the ids and baudrate likely won't match.
+
+#### Follower
+
+Connect the usb cable from your computer and the power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter.
+
+For a visual reference on how to set the motor ids please refer to [this video](https://huggingface.co/docs/lerobot/en/so101#setup-motors-video) where we follow the process for the SO101 arm.
+
+
+
+
+```bash
+python -m lerobot.setup_motors \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step
+```
+
+
+
+
+
+```python
+from lerobot.robots.so100_follower import SO100Follower, SO100FollowerConfig
+
+config = SO100FollowerConfig(
+ port="/dev/tty.usbmodem585A0076841",
+ id="my_awesome_follower_arm",
+)
+follower = SO100Follower(config)
+follower.setup_motors()
+```
+
+
+
+
+
+You should see the following instruction
+
+```
+Connect the controller board to the 'gripper' motor only and press enter.
+```
+
+As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor.
+
+
+Troubleshooting
+
+If you get an error at that point, check your cables and make sure they are plugged in properly:
+
+
+
Power supply
+
USB cable between your computer and the controller board
+
The 3-pin cable from the controller board to the motor
+
+
+If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
+
+
+
+You should then see the following message:
+
+```
+'gripper' motor id set to 6
+```
+
+Followed by the next instruction:
+
+```
+Connect the controller board to the 'wrist_roll' motor only and press enter.
+```
+
+You can disconnect the 3-pin cable from the controller board, but you can leave it connected to the gripper motor on the other end, as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one.
+
+Repeat the operation for each motor as instructed.
+
+> [!TIP]
+> Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board.
+
+When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm.
+
+#### Leader
+
+Do the same steps for the leader arm.
+
+
+
+```bash
+python -m lerobot.setup_motors \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
+```
+
+
+
+
+```python
+from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
+
+config = SO100LeaderConfig(
+ port="/dev/tty.usbmodem585A0076841",
+ id="my_awesome_leader_arm",
+)
+leader = SO100Leader(config)
+leader.setup_motors()
+```
+
+
+
+
+
+## Step-by-Step Assembly Instructions
+
+## Remove the gears of the 6 leader motors
+
+
+Video removing gears
+
+
+
+
+
+
+
+Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
+
+### Clean Parts
+
+Remove all support material from the 3D-printed parts. The easiest way to do this is using a small screwdriver to get underneath the support material.
+
+### Additional Guidance
+
+
+Video assembling arms
+
+
+
+
+
+
+
+**Note:**
+This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
+
+---
+
+### First Motor
+
+**Step 2: Insert Wires**
+
+- Insert two wires into the first motor.
+
+
+
+**Step 3: Install in Base**
+
+- Place the first motor into the base.
+
+
+
+**Step 4: Secure Motor**
+
+- Fasten the motor with 4 screws. Two from the bottom and two from top.
+
+**Step 5: Attach Motor Holder**
+
+- Slide over the first motor holder and fasten it using two screws (one on each side).
+
+
+
+**Step 6: Attach Motor Horns**
+
+- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
+
+
+
+
+
+ Video adding motor horn
+
+
+
+
+**Step 7: Attach Shoulder Part**
+
+- Route one wire to the back of the robot and the other to the left or towards you (see photo).
+- Attach the shoulder part.
+
+
+
+**Step 8: Secure Shoulder**
+
+- Tighten the shoulder part with 4 screws on top and 4 on the bottom
+ _(access bottom holes by turning the shoulder)._
+
+---
+
+### Second Motor Assembly
+
+**Step 9: Install Motor 2**
+
+- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
+
+
+
+**Step 10: Attach Shoulder Holder**
+
+- Add the shoulder motor holder.
+- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
+- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
+
+
+
+
+
+
+
+**Step 11: Secure Motor 2**
+
+- Fasten the second motor with 4 screws.
+
+**Step 12: Attach Motor Horn**
+
+- Attach both motor horns to motor 2, again use the horn screw.
+
+**Step 13: Attach Base**
+
+- Install the base attachment using 2 screws.
+
+
+
+**Step 14: Attach Upper Arm**
+
+- Attach the upper arm with 4 screws on each side.
+
+
+
+---
+
+### Third Motor Assembly
+
+**Step 15: Install Motor 3**
+
+- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
+
+**Step 16: Attach Motor Horn**
+
+- Attach both motor horns to motor 3 and secure one again with a horn screw.
+
+
+
+**Step 17: Attach Forearm**
+
+- Connect the forearm to motor 3 using 4 screws on each side.
+
+
+
+---
+
+### Fourth Motor Assembly
+
+**Step 18: Install Motor 4**
+
+- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
+
+
+
+
+
+
+**Step 19: Attach Motor Holder 4**
+
+- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
+
+
+
+**Step 20: Secure Motor 4 & Attach Horn**
+
+- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
+
+
+
+---
+
+### Wrist Assembly
+
+**Step 21: Install Motor 5**
+
+- Insert motor 5 into the wrist holder and secure it with 2 front screws.
+
+
+
+**Step 22: Attach Wrist**
+
+- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
+- Secure the wrist to motor 4 using 4 screws on both sides.
+
+
+
+**Step 23: Attach Wrist Horn**
+
+- Install only one motor horn on the wrist motor and secure it with a horn screw.
+
+
+
+---
+
+### Follower Configuration
+
+**Step 24: Attach Gripper**
+
+- Attach the gripper to motor 5.
+
+
+
+**Step 25: Install Gripper Motor**
+
+- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
+
+
+
+**Step 26: Attach Gripper Horn & Claw**
+
+- Attach the motor horns and again use a horn screw.
+- Install the gripper claw and secure it with 4 screws on both sides.
+
+
+
+**Step 27: Mount Controller**
+
+- Attach the motor controller to the back of the robot.
+
+
+
+
+
+
+_Assembly complete – proceed to Leader arm assembly._
+
+---
+
+### Leader Configuration
+
+For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors.
+
+**Step 24: Attach Leader Holder**
+
+- Mount the leader holder onto the wrist and secure it with a screw.
+
+
+
+**Step 25: Attach Handle**
+
+- Attach the handle to motor 5 using 4 screws.
+
+
+
+**Step 26: Install Gripper Motor**
+
+- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
+
+
+
+**Step 27: Attach Trigger**
+
+- Attach the follower trigger with 4 screws.
+
+
+
+**Step 28: Mount Controller**
+
+- Attach the motor controller to the back of the robot.
+
+
+
+
+
+
+## Calibrate
+
+Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position.
+The calibration process is very important because it allows a neural network trained on one robot to work on another.
+
+#### Follower
+
+Run the following command or API example to calibrate the follower arm:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --robot.id=my_awesome_follower_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.robots.so100_follower import SO100FollowerConfig, SO100Follower
+
+config = SO100FollowerConfig(
+ port="/dev/tty.usbmodem585A0076891",
+ id="my_awesome_follower_arm",
+)
+
+follower = SO100Follower(config)
+follower.connect(calibrate=False)
+follower.calibrate()
+follower.disconnect()
+```
+
+
+
+
+
+We unified the calibration method for most robots. Thus, the calibration steps for this SO100 arm are the same as the steps for the Koch and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video)
+
+#### Leader
+
+Do the same steps to calibrate the leader arm, run the following command or API example:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.teleoperators.so100_leader import SO100LeaderConfig, SO100Leader
+
+config = SO100LeaderConfig(
+ port="/dev/tty.usbmodem58760431551",
+ id="my_awesome_leader_arm",
+)
+
+leader = SO100Leader(config)
+leader.connect(calibrate=False)
+leader.calibrate()
+leader.disconnect()
+```
+
+
+
+
+
+Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
diff --git a/src/lerobot/robots/so100_follower/so100_follower.py b/src/lerobot/robots/so100_follower/so100_follower.py
new file mode 100644
index 0000000000..e5da6bc1a8
--- /dev/null
+++ b/src/lerobot/robots/so100_follower/so100_follower.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+ OperatingMode,
+)
+
+from ..robot import Robot
+from ..utils import ensure_safe_goal_position
+from .config_so100_follower import SO100FollowerConfig
+
+logger = logging.getLogger(__name__)
+
+
+class SO100Follower(Robot):
+ """
+ [SO-100 Follower Arm](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio
+ """
+
+ config_class = SO100FollowerConfig
+ name = "so100_follower"
+
+ def __init__(self, config: SO100FollowerConfig):
+ super().__init__(config)
+ self.config = config
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "sts3215", norm_mode_body),
+ "shoulder_lift": Motor(2, "sts3215", norm_mode_body),
+ "elbow_flex": Motor(3, "sts3215", norm_mode_body),
+ "wrist_flex": Motor(4, "sts3215", norm_mode_body),
+ "wrist_roll": Motor(5, "sts3215", norm_mode_body),
+ "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ We assume that at connection time, arm is in a rest position,
+ and torque can be safely disabled to run calibration.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ full_turn_motor = "wrist_roll"
+ unknown_range_motors = [motor for motor in self.bus.motors if motor != full_turn_motor]
+ print(
+ f"Move all joints except '{full_turn_motor}' sequentially through their "
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ range_mins[full_turn_motor] = 0
+ range_maxes[full_turn_motor] = 4095
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+ # Set P_Coefficient to lower value to avoid shakiness (Default is 32)
+ self.bus.write("P_Coefficient", motor, 16)
+ # Set I_Coefficient and D_Coefficient to default value 0 and 32
+ self.bus.write("I_Coefficient", motor, 0)
+ self.bus.write("D_Coefficient", motor, 32)
+
+ def setup_motors(self) -> None:
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Read arm position
+ start = time.perf_counter()
+ obs_dict = self.bus.sync_read("Present_Position")
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ """Command arm to move to a target joint configuration.
+
+ The relative action magnitude may be clipped depending on the configuration parameter
+ `max_relative_target`. In this case, the action sent differs from original action.
+ Thus, this function always returns the action actually sent.
+
+ Raises:
+ RobotDeviceNotConnectedError: if robot is not connected.
+
+ Returns:
+ the action sent to the motors, potentially clipped.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
+
+ # Cap goal position when too far away from present position.
+ # /!\ Slower fps expected due to reading from the follower.
+ if self.config.max_relative_target is not None:
+ present_pos = self.bus.sync_read("Present_Position")
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
+
+ # Send goal position to the arm
+ self.bus.sync_write("Goal_Position", goal_pos)
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/robots/so100_follower/so100_follower_end_effector.py b/src/lerobot/robots/so100_follower/so100_follower_end_effector.py
new file mode 100644
index 0000000000..5fe2993cb3
--- /dev/null
+++ b/src/lerobot/robots/so100_follower/so100_follower_end_effector.py
@@ -0,0 +1,200 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from typing import Any
+
+import numpy as np
+
+from lerobot.cameras import make_cameras_from_configs
+from lerobot.errors import DeviceNotConnectedError
+from lerobot.model.kinematics import RobotKinematics
+from lerobot.motors import Motor, MotorNormMode
+from lerobot.motors.feetech import FeetechMotorsBus
+
+from . import SO100Follower
+from .config_so100_follower import SO100FollowerEndEffectorConfig
+
+logger = logging.getLogger(__name__)
+
+
+class SO100FollowerEndEffector(SO100Follower):
+ """
+ SO100Follower robot with end-effector space control.
+
+ This robot inherits from SO100Follower but transforms actions from
+ end-effector space to joint space before sending them to the motors.
+ """
+
+ config_class = SO100FollowerEndEffectorConfig
+ name = "so100_follower_end_effector"
+
+ def __init__(self, config: SO100FollowerEndEffectorConfig):
+ super().__init__(config)
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "sts3215", MotorNormMode.DEGREES),
+ "shoulder_lift": Motor(2, "sts3215", MotorNormMode.DEGREES),
+ "elbow_flex": Motor(3, "sts3215", MotorNormMode.DEGREES),
+ "wrist_flex": Motor(4, "sts3215", MotorNormMode.DEGREES),
+ "wrist_roll": Motor(5, "sts3215", MotorNormMode.DEGREES),
+ "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ self.config = config
+
+ # Initialize the kinematics module for the so100 robot
+ if self.config.urdf_path is None:
+ raise ValueError(
+ "urdf_path must be provided in the configuration for end-effector control. "
+ "Please set urdf_path in your SO100FollowerEndEffectorConfig."
+ )
+
+ self.kinematics = RobotKinematics(
+ urdf_path=self.config.urdf_path,
+ target_frame_name=self.config.target_frame_name,
+ )
+
+ # Store the bounds for end-effector position
+ self.end_effector_bounds = self.config.end_effector_bounds
+
+ self.current_ee_pos = None
+ self.current_joint_pos = None
+
+ @property
+ def action_features(self) -> dict[str, Any]:
+ """
+ Define action features for end-effector control.
+ Returns dictionary with dtype, shape, and names.
+ """
+ return {
+ "dtype": "float32",
+ "shape": (4,),
+ "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3},
+ }
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ """
+ Transform action from end-effector space to joint space and send to motors.
+
+ Args:
+ action: Dictionary with keys 'delta_x', 'delta_y', 'delta_z' for end-effector control
+ or a numpy array with [delta_x, delta_y, delta_z]
+
+ Returns:
+ The joint-space action that was sent to the motors
+ """
+
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Convert action to numpy array if not already
+ if isinstance(action, dict):
+ if all(k in action for k in ["delta_x", "delta_y", "delta_z"]):
+ delta_ee = np.array(
+ [
+ action["delta_x"] * self.config.end_effector_step_sizes["x"],
+ action["delta_y"] * self.config.end_effector_step_sizes["y"],
+ action["delta_z"] * self.config.end_effector_step_sizes["z"],
+ ],
+ dtype=np.float32,
+ )
+ if "gripper" not in action:
+ action["gripper"] = [1.0]
+ action = np.append(delta_ee, action["gripper"])
+ else:
+ logger.warning(
+ f"Expected action keys 'delta_x', 'delta_y', 'delta_z', got {list(action.keys())}"
+ )
+ action = np.zeros(4, dtype=np.float32)
+
+ if self.current_joint_pos is None:
+ # Read current joint positions
+ current_joint_pos = self.bus.sync_read("Present_Position")
+ self.current_joint_pos = np.array([current_joint_pos[name] for name in self.bus.motors])
+
+ # Calculate current end-effector position using forward kinematics
+ if self.current_ee_pos is None:
+ self.current_ee_pos = self.kinematics.forward_kinematics(self.current_joint_pos)
+
+ # Set desired end-effector position by adding delta
+ desired_ee_pos = np.eye(4)
+ desired_ee_pos[:3, :3] = self.current_ee_pos[:3, :3] # Keep orientation
+
+ # Add delta to position and clip to bounds
+ desired_ee_pos[:3, 3] = self.current_ee_pos[:3, 3] + action[:3]
+ if self.end_effector_bounds is not None:
+ desired_ee_pos[:3, 3] = np.clip(
+ desired_ee_pos[:3, 3],
+ self.end_effector_bounds["min"],
+ self.end_effector_bounds["max"],
+ )
+
+ # Compute inverse kinematics to get joint positions
+ target_joint_values_in_degrees = self.kinematics.inverse_kinematics(
+ self.current_joint_pos, desired_ee_pos
+ )
+
+ # Create joint space action dictionary
+ joint_action = {
+ f"{key}.pos": target_joint_values_in_degrees[i] for i, key in enumerate(self.bus.motors.keys())
+ }
+
+ # Handle gripper separately if included in action
+ # Gripper delta action is in the range 0 - 2,
+ # We need to shift the action to the range -1, 1 so that we can expand it to -Max_gripper_pos, Max_gripper_pos
+ joint_action["gripper.pos"] = np.clip(
+ self.current_joint_pos[-1] + (action[-1] - 1) * self.config.max_gripper_pos,
+ 5,
+ self.config.max_gripper_pos,
+ )
+
+ self.current_ee_pos = desired_ee_pos.copy()
+ self.current_joint_pos = target_joint_values_in_degrees.copy()
+ self.current_joint_pos[-1] = joint_action["gripper.pos"]
+
+ # Send joint space action to parent class
+ return super().send_action(joint_action)
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Read arm position
+ start = time.perf_counter()
+ obs_dict = self.bus.sync_read("Present_Position")
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def reset(self):
+ self.current_ee_pos = None
+ self.current_joint_pos = None
diff --git a/src/lerobot/robots/so101_follower/__init__.py b/src/lerobot/robots/so101_follower/__init__.py
new file mode 100644
index 0000000000..9ff2baf452
--- /dev/null
+++ b/src/lerobot/robots/so101_follower/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_so101_follower import SO101FollowerConfig
+from .so101_follower import SO101Follower
diff --git a/src/lerobot/robots/so101_follower/config_so101_follower.py b/src/lerobot/robots/so101_follower/config_so101_follower.py
new file mode 100644
index 0000000000..be630e6ac3
--- /dev/null
+++ b/src/lerobot/robots/so101_follower/config_so101_follower.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("so101_follower")
+@dataclass
+class SO101FollowerConfig(RobotConfig):
+ # Port to connect to the arm
+ port: str
+
+ disable_torque_on_disconnect: bool = True
+
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ max_relative_target: int | None = None
+
+ # cameras
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
+
+ # Set to `True` for backward compatibility with previous policies/dataset
+ use_degrees: bool = False
diff --git a/src/lerobot/robots/so101_follower/so101.mdx b/src/lerobot/robots/so101_follower/so101.mdx
new file mode 100644
index 0000000000..e84336e174
--- /dev/null
+++ b/src/lerobot/robots/so101_follower/so101.mdx
@@ -0,0 +1,434 @@
+# SO-101
+
+In the steps below, we explain how to assemble our flagship robot, the SO-101.
+
+## Source the parts
+
+Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts.
+And advise if it's your first time printing or if you don't own a 3D printer.
+
+## Install LeRobot 🤗
+
+To install LeRobot, follow our [Installation Guide](./installation)
+
+In addition to these instructions, you need to install the Feetech SDK:
+
+```bash
+pip install -e ".[feetech]"
+```
+
+## Step-by-Step Assembly Instructions
+
+The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader, however, uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in the table below.
+
+| Leader-Arm Axis | Motor | Gear Ratio |
+| ------------------- | :---: | :--------: |
+| Base / Shoulder Pan | 1 | 1 / 191 |
+| Shoulder Lift | 2 | 1 / 345 |
+| Elbow Flex | 3 | 1 / 191 |
+| Wrist Flex | 4 | 1 / 147 |
+| Wrist Roll | 5 | 1 / 147 |
+| Gripper | 6 | 1 / 147 |
+
+### Clean Parts
+
+Remove all support material from the 3D-printed parts. The easiest way to do this is using a small screwdriver to get underneath the support material.
+
+### Joint 1
+
+- Place the first motor into the base.
+- Fasten the motor with 4 M2x6mm screws (smallest screws). Two from the top and two from the bottom.
+- Slide over the first motor holder and fasten it using two M2x6mm screws (one on each side).
+- Install both motor horns, securing the top horn with a M3x6mm screw.
+- Attach the shoulder part.
+- Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom
+- Add the shoulder motor holder.
+
+
+
+
+
+### Joint 2
+
+- Slide the second motor in from the top.
+- Fasten the second motor with 4 M2x6mm screws.
+- Attach both motor horns to motor 2, again use the M3x6mm horn screw.
+- Attach the upper arm with 4 M3x6mm screws on each side.
+
+
+
+
+
+### Joint 3
+
+- Insert motor 3 and fasten using 4 M2x6mm screws
+- Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw.
+- Connect the forearm to motor 3 using 4 M3x6mm screws on each side.
+
+
+
+
+
+### Joint 4
+
+- Slide over motor holder 4.
+- Slide in motor 4.
+- Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw.
+
+
+
+
+
+### Joint 5
+
+- Insert motor 5 into the wrist holder and secure it with 2 M2x6mm front screws.
+- Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw.
+- Secure the wrist to motor 4 using 4 M3x6mm screws on both sides.
+
+
+
+
+
+### Gripper / Handle
+
+
+
+
+- Attach the gripper to motor 5, attach it to the motor horn on the wrist using 4 M3x6mm screws.
+- Insert the gripper motor and secure it with 2 M2x6mm screws on each side.
+- Attach the motor horns and again use a M3x6mm horn screw.
+- Install the gripper claw and secure it with 4 M3x6mm screws on both sides.
+
+
+
+
+
+
+
+
+- Mount the leader holder onto the wrist and secure it with 4 M3x6mm screws.
+- Attach the handle to motor 5 using 1 M2x6mm screw.
+- Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw.
+- Attach the follower trigger with 4 M3x6mm screws.
+
+
+
+
+
+
+
+
+## Configure the motors
+
+### 1. Find the USB ports associated with each arm
+
+To find the port for each bus servo adapter, run this script:
+
+```bash
+python -m lerobot.find_port
+```
+
+
+
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
+Remove the USB cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/tty.usbmodem575E0032081
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm.
+
+
+
+
+On Linux, you might need to give access to the USB ports by running:
+
+```bash
+sudo chmod 666 /dev/ttyACM0
+sudo chmod 666 /dev/ttyACM1
+```
+
+Example output:
+
+```
+Finding all available ports for the MotorBus.
+['/dev/ttyACM0', '/dev/ttyACM1']
+Remove the usb cable from your MotorsBus and press Enter when done.
+
+[...Disconnect corresponding leader or follower arm and press Enter...]
+
+The port of this MotorsBus is /dev/ttyACM1
+Reconnect the USB cable.
+```
+
+Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm.
+
+
+
+
+### 2. Set the motors ids and baudrates
+
+Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate.
+
+To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once.
+
+If you are repurposing motors from another robot, you will probably also need to perform this step as the ids and baudrate likely won't match.
+
+The video below shows the sequence of steps for setting the motor ids.
+
+##### Setup motors video
+
+
+
+
+
+#### Follower
+
+Connect the usb cable from your computer and the power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter.
+
+
+
+
+```bash
+python -m lerobot.setup_motors \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step
+```
+
+
+
+
+
+```python
+from lerobot.robots.so101_follower import SO101Follower, SO101FollowerConfig
+
+config = SO101FollowerConfig(
+ port="/dev/tty.usbmodem585A0076841",
+ id="my_awesome_follower_arm",
+)
+follower = SO101Follower(config)
+follower.setup_motors()
+```
+
+
+
+
+
+You should see the following instruction
+
+```bash
+Connect the controller board to the 'gripper' motor only and press enter.
+```
+
+As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor.
+
+
+Troubleshooting
+
+If you get an error at that point, check your cables and make sure they are plugged in properly:
+
+
+
Power supply
+
USB cable between your computer and the controller board
+
The 3-pin cable from the controller board to the motor
+
+
+If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
+
+
+
+You should then see the following message:
+
+```bash
+'gripper' motor id set to 6
+```
+
+Followed by the next instruction:
+
+```bash
+Connect the controller board to the 'wrist_roll' motor only and press enter.
+```
+
+You can disconnect the 3-pin cable from the controller board, but you can leave it connected to the gripper motor on the other end, as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one.
+
+Repeat the operation for each motor as instructed.
+
+> [!TIP]
+> Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board.
+
+When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm.
+
+#### Leader
+
+Do the same steps for the leader arm.
+
+
+
+
+```bash
+python -m lerobot.setup_motors \
+ --teleop.type=so101_leader \
+ --teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
+```
+
+
+
+
+
+```python
+from lerobot.teleoperators.so101_leader import SO101Leader, SO101LeaderConfig
+
+config = SO101LeaderConfig(
+ port="/dev/tty.usbmodem585A0076841",
+ id="my_awesome_leader_arm",
+)
+leader = SO101Leader(config)
+leader.setup_motors()
+```
+
+
+
+
+
+## Calibrate
+
+Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position.
+The calibration process is very important because it allows a neural network trained on one robot to work on another.
+
+#### Follower
+
+Run the following command or API example to calibrate the follower arm:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --robot.id=my_awesome_follower_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower
+
+config = SO101FollowerConfig(
+ port="/dev/tty.usbmodem585A0076891",
+ id="my_awesome_follower_arm",
+)
+
+follower = SO101Follower(config)
+follower.connect(calibrate=False)
+follower.calibrate()
+follower.disconnect()
+```
+
+
+
+
+
+The video below shows how to perform the calibration. First you need to move the robot to the position where all joints are in the middle of their ranges. Then after pressing enter you have to move each joint through its full range of motion.
+
+##### Calibration video
+
+
+
+
+
+#### Leader
+
+Do the same steps to calibrate the leader arm, run the following command or API example:
+
+
+
+
+```bash
+python -m lerobot.calibrate \
+ --teleop.type=so101_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
+ --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
+```
+
+
+
+
+
+```python
+from lerobot.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader
+
+config = SO101LeaderConfig(
+ port="/dev/tty.usbmodem58760431551",
+ id="my_awesome_leader_arm",
+)
+
+leader = SO101Leader(config)
+leader.connect(calibrate=False)
+leader.calibrate()
+leader.disconnect()
+```
+
+
+
+
+
+Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
+
+> [!TIP]
+> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
diff --git a/src/lerobot/robots/so101_follower/so101_follower.py b/src/lerobot/robots/so101_follower/so101_follower.py
new file mode 100644
index 0000000000..3ae3c3967d
--- /dev/null
+++ b/src/lerobot/robots/so101_follower/so101_follower.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+ OperatingMode,
+)
+
+from ..robot import Robot
+from ..utils import ensure_safe_goal_position
+from .config_so101_follower import SO101FollowerConfig
+
+logger = logging.getLogger(__name__)
+
+
+class SO101Follower(Robot):
+ """
+ SO-101 Follower Arm designed by TheRobotStudio and Hugging Face.
+ """
+
+ config_class = SO101FollowerConfig
+ name = "so101_follower"
+
+ def __init__(self, config: SO101FollowerConfig):
+ super().__init__(config)
+ self.config = config
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "sts3215", norm_mode_body),
+ "shoulder_lift": Motor(2, "sts3215", norm_mode_body),
+ "elbow_flex": Motor(3, "sts3215", norm_mode_body),
+ "wrist_flex": Motor(4, "sts3215", norm_mode_body),
+ "wrist_roll": Motor(5, "sts3215", norm_mode_body),
+ "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ We assume that at connection time, arm is in a rest position,
+ and torque can be safely disabled to run calibration.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ print(
+ "Move all joints sequentially through their entire ranges "
+ "of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion()
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+ # Set P_Coefficient to lower value to avoid shakiness (Default is 32)
+ self.bus.write("P_Coefficient", motor, 16)
+ # Set I_Coefficient and D_Coefficient to default value 0 and 32
+ self.bus.write("I_Coefficient", motor, 0)
+ self.bus.write("D_Coefficient", motor, 32)
+
+ def setup_motors(self) -> None:
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_observation(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ # Read arm position
+ start = time.perf_counter()
+ obs_dict = self.bus.sync_read("Present_Position")
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
+ """Command arm to move to a target joint configuration.
+
+ The relative action magnitude may be clipped depending on the configuration parameter
+ `max_relative_target`. In this case, the action sent differs from original action.
+ Thus, this function always returns the action actually sent.
+
+ Raises:
+ RobotDeviceNotConnectedError: if robot is not connected.
+
+ Returns:
+ the action sent to the motors, potentially clipped.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
+
+ # Cap goal position when too far away from present position.
+ # /!\ Slower fps expected due to reading from the follower.
+ if self.config.max_relative_target is not None:
+ present_pos = self.bus.sync_read("Present_Position")
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
+
+ # Send goal position to the arm
+ self.bus.sync_write("Goal_Position", goal_pos)
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/robots/stretch3/README.md b/src/lerobot/robots/stretch3/README.md
new file mode 100644
index 0000000000..7247322863
--- /dev/null
+++ b/src/lerobot/robots/stretch3/README.md
@@ -0,0 +1,177 @@
+This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot.
+
+## Setup
+
+Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
+
+To use LeRobot on Stretch, 3 options are available:
+
+- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
+- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
+- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
+
+## Install LeRobot
+
+On Stretch's CLI, follow these steps:
+
+1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
+
+```bash
+mkdir -p ~/miniconda3
+wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
+bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
+rm ~/miniconda3/miniconda.sh
+~/miniconda3/bin/conda init bash
+```
+
+2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
+
+```
+# set PATH so it includes user's private bin if it exists
+if [ -d "$HOME/.local/bin" ] ; then
+ PATH="$HOME/.local/bin:$PATH"
+fi
+```
+
+3. Restart shell or `source ~/.bashrc`
+
+4. Create and activate a fresh conda environment for lerobot
+
+```bash
+conda create -y -n lerobot python=3.10 && conda activate lerobot
+```
+
+5. Clone LeRobot:
+
+```bash
+git clone https://github.com/huggingface/lerobot.git ~/lerobot
+```
+
+6. When using `miniconda`, install `ffmpeg` in your environment:
+
+```bash
+conda install ffmpeg -c conda-forge
+```
+
+7. Install LeRobot with stretch dependencies:
+
+```bash
+cd ~/lerobot && pip install -e ".[stretch]"
+```
+
+> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
+
+8. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
+
+```bash
+stretch_system_check.py
+```
+
+> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
+
+You should get something like this:
+
+```bash
+For use with S T R E T C H (R) from Hello Robot Inc.
+---------------------------------------------------------------------
+
+Model = Stretch 3
+Tool = DexWrist 3 w/ Gripper
+Serial Number = stretch-se3-3054
+
+---- Checking Hardware ----
+[Pass] Comms are ready
+[Pass] Actuators are ready
+[Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5)
+[Pass] Battery voltage is 13.6 V
+
+---- Checking Software ----
+[Pass] Ubuntu 22.04 is ready
+[Pass] All APT pkgs are setup correctly
+[Pass] Firmware is up-to-date
+[Pass] Python pkgs are up-to-date
+[Pass] ROS2 Humble is ready
+```
+
+## Teleoperate, record a dataset and run a policy
+
+**Calibrate (Optional)**
+Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=stretch \
+ --control.type=calibrate
+```
+
+This is equivalent to running `stretch_robot_home.py`
+
+> **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first.
+
+**Teleoperate**
+Before trying teleoperation, you need to activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
+
+Now try out teleoperation (see above documentation to learn about the gamepad controls):
+
+> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=stretch \
+ --control.type=teleoperate
+```
+
+This is essentially the same as running `stretch_gamepad_teleop.py`
+
+**Record a dataset**
+Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
+
+If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
+
+```bash
+huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
+```
+
+Store your Hugging Face repository name in a variable to run these commands:
+
+```bash
+HF_USER=$(huggingface-cli whoami | head -n 1)
+echo $HF_USER
+```
+
+Record one episode:
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=stretch \
+ --control.type=record \
+ --control.fps=30 \
+ --control.single_task="Grasp a lego block and put it in the bin." \
+ --control.repo_id=${HF_USER}/stretch_test \
+ --control.tags='["tutorial"]' \
+ --control.warmup_time_s=5 \
+ --control.episode_time_s=30 \
+ --control.reset_time_s=30 \
+ --control.num_episodes=2 \
+ --control.push_to_hub=true
+```
+
+> **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup).
+
+**Replay an episode**
+Now try to replay this episode (make sure the robot's initial position is the same):
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=stretch \
+ --control.type=replay \
+ --control.fps=30 \
+ --control.repo_id=${HF_USER}/stretch_test \
+ --control.episode=0
+```
+
+Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch.
+
+> TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files
+
+If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`.
diff --git a/src/lerobot/robots/stretch3/__init__.py b/src/lerobot/robots/stretch3/__init__.py
new file mode 100644
index 0000000000..b3070bbd65
--- /dev/null
+++ b/src/lerobot/robots/stretch3/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .configuration_stretch3 import Stretch3RobotConfig
+from .robot_stretch3 import Stretch3Robot
diff --git a/src/lerobot/robots/stretch3/configuration_stretch3.py b/src/lerobot/robots/stretch3/configuration_stretch3.py
new file mode 100644
index 0000000000..9fcf8f7426
--- /dev/null
+++ b/src/lerobot/robots/stretch3/configuration_stretch3.py
@@ -0,0 +1,58 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+from lerobot.cameras.opencv import OpenCVCameraConfig
+from lerobot.cameras.realsense import RealSenseCameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("stretch3")
+@dataclass
+class Stretch3RobotConfig(RobotConfig):
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ max_relative_target: int | None = None
+
+ # cameras
+ cameras: dict[str, CameraConfig] = field(
+ default_factory=lambda: {
+ "navigation": OpenCVCameraConfig(
+ index_or_path="/dev/hello-nav-head-camera",
+ fps=10,
+ width=1280,
+ height=720,
+ rotation=-90,
+ ),
+ "head": RealSenseCameraConfig(
+ name="Intel RealSense D435I",
+ fps=30,
+ width=640,
+ height=480,
+ rotation=90,
+ ),
+ "wrist": RealSenseCameraConfig(
+ name="Intel RealSense D405",
+ fps=30,
+ width=640,
+ height=480,
+ ),
+ }
+ )
+
+ mock: bool = False
diff --git a/src/lerobot/robots/stretch3/robot_stretch3.py b/src/lerobot/robots/stretch3/robot_stretch3.py
new file mode 100644
index 0000000000..b907d6a3f2
--- /dev/null
+++ b/src/lerobot/robots/stretch3/robot_stretch3.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+import numpy as np
+from stretch_body.gamepad_teleop import GamePadTeleop
+from stretch_body.robot import Robot as StretchAPI
+from stretch_body.robot_params import RobotParams
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.constants import OBS_IMAGES, OBS_STATE
+from lerobot.datasets.utils import get_nested_item
+
+from ..robot import Robot
+from .configuration_stretch3 import Stretch3RobotConfig
+
+# {lerobot_keys: stretch.api.keys}
+STRETCH_MOTORS = {
+ "head_pan.pos": "head.head_pan.pos",
+ "head_tilt.pos": "head.head_tilt.pos",
+ "lift.pos": "lift.pos",
+ "arm.pos": "arm.pos",
+ "wrist_pitch.pos": "end_of_arm.wrist_pitch.pos",
+ "wrist_roll.pos": "end_of_arm.wrist_roll.pos",
+ "wrist_yaw.pos": "end_of_arm.wrist_yaw.pos",
+ "gripper.pos": "end_of_arm.stretch_gripper.pos",
+ "base_x.vel": "base.x_vel",
+ "base_y.vel": "base.y_vel",
+ "base_theta.vel": "base.theta_vel",
+}
+
+
+class Stretch3Robot(Robot):
+ """[Stretch 3](https://hello-robot.com/stretch-3-product), by Hello Robot."""
+
+ config_class = Stretch3RobotConfig
+ name = "stretch3"
+
+ def __init__(self, config: Stretch3RobotConfig):
+ raise NotImplementedError
+ super().__init__(config)
+
+ self.config = config
+ self.robot_type = self.config.type
+
+ self.api = StretchAPI()
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ self.is_connected = False
+ self.logs = {}
+
+ self.teleop = None # TODO remove
+
+ # TODO(aliberts): test this
+ RobotParams.set_logging_level("WARNING")
+ RobotParams.set_logging_formatter("brief_console_formatter")
+
+ self.state_keys = None
+ self.action_keys = None
+
+ @property
+ def observation_features(self) -> dict:
+ return {
+ "dtype": "float32",
+ "shape": (len(STRETCH_MOTORS),),
+ "names": {"motors": list(STRETCH_MOTORS)},
+ }
+
+ @property
+ def action_features(self) -> dict:
+ return self.observation_features
+
+ @property
+ def camera_features(self) -> dict[str, dict]:
+ cam_ft = {}
+ for cam_key, cam in self.cameras.items():
+ cam_ft[cam_key] = {
+ "shape": (cam.height, cam.width, cam.channels),
+ "names": ["height", "width", "channels"],
+ "info": None,
+ }
+ return cam_ft
+
+ def connect(self) -> None:
+ self.is_connected = self.api.startup()
+ if not self.is_connected:
+ print("Another process is already using Stretch. Try running 'stretch_free_robot_process.py'")
+ raise ConnectionError()
+
+ for cam in self.cameras.values():
+ cam.connect()
+ self.is_connected = self.is_connected and cam.is_connected
+
+ if not self.is_connected:
+ print("Could not connect to the cameras, check that all cameras are plugged-in.")
+ raise ConnectionError()
+
+ self.calibrate()
+
+ def calibrate(self) -> None:
+ if not self.api.is_homed():
+ self.api.home()
+
+ def _get_state(self) -> dict:
+ status = self.api.get_status()
+ return {k: get_nested_item(status, v, sep=".") for k, v in STRETCH_MOTORS.items()}
+
+ def get_observation(self) -> dict[str, np.ndarray]:
+ obs_dict = {}
+
+ # Read Stretch state
+ before_read_t = time.perf_counter()
+ state = self._get_state()
+ self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
+
+ if self.state_keys is None:
+ self.state_keys = list(state)
+
+ state = np.asarray(list(state.values()))
+ obs_dict[OBS_STATE] = state
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ before_camread_t = time.perf_counter()
+ obs_dict[f"{OBS_IMAGES}.{cam_key}"] = cam.async_read()
+ self.logs[f"read_camera_{cam_key}_dt_s"] = cam.logs["delta_timestamp_s"]
+ self.logs[f"async_read_camera_{cam_key}_dt_s"] = time.perf_counter() - before_camread_t
+
+ return obs_dict
+
+ def send_action(self, action: np.ndarray) -> np.ndarray:
+ if not self.is_connected:
+ raise ConnectionError()
+
+ if self.teleop is None:
+ self.teleop = GamePadTeleop(robot_instance=False)
+ self.teleop.startup(robot=self)
+
+ if self.action_keys is None:
+ dummy_action = self.teleop.gamepad_controller.get_state()
+ self.action_keys = list(dummy_action.keys())
+
+ action_dict = dict(zip(self.action_keys, action.tolist(), strict=True))
+
+ before_write_t = time.perf_counter()
+ self.teleop.do_motion(state=action_dict, robot=self)
+ self.push_command()
+ self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
+
+ # TODO(aliberts): return action_sent when motion is limited
+ return action
+
+ def print_logs(self) -> None:
+ pass
+ # TODO(aliberts): move robot-specific logs logic here
+
+ def teleop_safety_stop(self) -> None:
+ if self.teleop is not None:
+ self.teleop._safety_stop(robot=self)
+
+ def disconnect(self) -> None:
+ self.api.stop()
+ if self.teleop is not None:
+ self.teleop.gamepad_controller.stop()
+ self.teleop.stop()
+
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ self.is_connected = False
diff --git a/src/lerobot/robots/utils.py b/src/lerobot/robots/utils.py
new file mode 100644
index 0000000000..7486ee499d
--- /dev/null
+++ b/src/lerobot/robots/utils.py
@@ -0,0 +1,107 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from pprint import pformat
+
+from lerobot.robots import RobotConfig
+
+from .robot import Robot
+
+
+def make_robot_from_config(config: RobotConfig) -> Robot:
+ if config.type == "koch_follower":
+ from .koch_follower import KochFollower
+
+ return KochFollower(config)
+ elif config.type == "so100_follower":
+ from .so100_follower import SO100Follower
+
+ return SO100Follower(config)
+ elif config.type == "so100_follower_end_effector":
+ from .so100_follower import SO100FollowerEndEffector
+
+ return SO100FollowerEndEffector(config)
+ elif config.type == "so101_follower":
+ from .so101_follower import SO101Follower
+
+ return SO101Follower(config)
+ elif config.type == "lekiwi":
+ from .lekiwi import LeKiwi
+
+ return LeKiwi(config)
+ elif config.type == "stretch3":
+ from .stretch3 import Stretch3Robot
+
+ return Stretch3Robot(config)
+ elif config.type == "viperx":
+ from .viperx import ViperX
+
+ return ViperX(config)
+ elif config.type == "hope_jr_hand":
+ from .hope_jr import HopeJrHand
+
+ return HopeJrHand(config)
+ elif config.type == "hope_jr_arm":
+ from .hope_jr import HopeJrArm
+
+ return HopeJrArm(config)
+ elif config.type == "bi_so100_follower":
+ from .bi_so100_follower import BiSO100Follower
+
+ return BiSO100Follower(config)
+ elif config.type == "mock_robot":
+ from tests.mocks.mock_robot import MockRobot
+
+ return MockRobot(config)
+ else:
+ raise ValueError(config.type)
+
+
+def ensure_safe_goal_position(
+ goal_present_pos: dict[str, tuple[float, float]], max_relative_target: float | dict[float]
+) -> dict[str, float]:
+ """Caps relative action target magnitude for safety."""
+
+ if isinstance(max_relative_target, float):
+ diff_cap = dict.fromkeys(goal_present_pos, max_relative_target)
+ elif isinstance(max_relative_target, dict):
+ if not set(goal_present_pos) == set(max_relative_target):
+ raise ValueError("max_relative_target keys must match those of goal_present_pos.")
+ diff_cap = max_relative_target
+ else:
+ raise TypeError(max_relative_target)
+
+ warnings_dict = {}
+ safe_goal_positions = {}
+ for key, (goal_pos, present_pos) in goal_present_pos.items():
+ diff = goal_pos - present_pos
+ max_diff = diff_cap[key]
+ safe_diff = min(diff, max_diff)
+ safe_diff = max(safe_diff, -max_diff)
+ safe_goal_pos = present_pos + safe_diff
+ safe_goal_positions[key] = safe_goal_pos
+ if abs(safe_goal_pos - goal_pos) > 1e-4:
+ warnings_dict[key] = {
+ "original goal_pos": goal_pos,
+ "safe goal_pos": safe_goal_pos,
+ }
+
+ if warnings_dict:
+ logging.warning(
+ "Relative goal position magnitude had to be clamped to be safe.\n"
+ f"{pformat(warnings_dict, indent=4)}"
+ )
+
+ return safe_goal_positions
diff --git a/src/lerobot/robots/viperx/README.md b/src/lerobot/robots/viperx/README.md
new file mode 100644
index 0000000000..4e90c99c71
--- /dev/null
+++ b/src/lerobot/robots/viperx/README.md
@@ -0,0 +1,198 @@
+This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot.
+
+## Setup
+
+Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/2.0/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer.
+
+## Install LeRobot
+
+On your computer:
+
+1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
+
+```bash
+mkdir -p ~/miniconda3
+wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
+bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
+rm ~/miniconda3/miniconda.sh
+~/miniconda3/bin/conda init bash
+```
+
+2. Restart shell or `source ~/.bashrc`
+
+3. Create and activate a fresh conda environment for lerobot
+
+```bash
+conda create -y -n lerobot python=3.10 && conda activate lerobot
+```
+
+4. Clone LeRobot:
+
+```bash
+git clone https://github.com/huggingface/lerobot.git ~/lerobot
+```
+
+5. When using `miniconda`, install `ffmpeg` in your environment:
+
+```bash
+conda install ffmpeg -c conda-forge
+```
+
+6. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense):
+
+```bash
+cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]"
+```
+
+## Teleoperate
+
+\*\*/!\ FOR SAFETY, READ THIS /!\*\*
+Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly:
+
+1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms,
+2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics.
+
+By running the following code, you can start your first **SAFE** teleoperation:
+
+> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=aloha \
+ --robot.max_relative_target=5 \
+ --control.type=teleoperate
+```
+
+By adding `--robot.max_relative_target=5`, we override the default value for `max_relative_target` defined in [`AlohaRobotConfig`](lerobot/robot_devices/robots/configs.py). It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot.max_relative_target=null` to the command line:
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=aloha \
+ --robot.max_relative_target=null \
+ --control.type=teleoperate
+```
+
+## Record a dataset
+
+Once you're familiar with teleoperation, you can record your first dataset with Aloha.
+
+If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
+
+```bash
+huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
+```
+
+Store your Hugging Face repository name in a variable to run these commands:
+
+```bash
+HF_USER=$(huggingface-cli whoami | head -n 1)
+echo $HF_USER
+```
+
+Record 2 episodes and upload your dataset to the hub:
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=aloha \
+ --robot.max_relative_target=null \
+ --control.type=record \
+ --control.fps=30 \
+ --control.single_task="Grasp a lego block and put it in the bin." \
+ --control.repo_id=${HF_USER}/aloha_test \
+ --control.tags='["tutorial"]' \
+ --control.warmup_time_s=5 \
+ --control.episode_time_s=30 \
+ --control.reset_time_s=30 \
+ --control.num_episodes=2 \
+ --control.push_to_hub=true
+```
+
+## Visualize a dataset
+
+If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
+
+```bash
+echo ${HF_USER}/aloha_test
+```
+
+If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with:
+
+```bash
+python -m lerobot.scripts.visualize_dataset_html \
+ --repo-id ${HF_USER}/aloha_test
+```
+
+## Replay an episode
+
+\*\*/!\ FOR SAFETY, READ THIS /!\*\*
+Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot.max_relative_target=5` to your command line as explained above.
+
+Now try to replay the first episode on your robot:
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=aloha \
+ --robot.max_relative_target=null \
+ --control.type=replay \
+ --control.fps=30 \
+ --control.repo_id=${HF_USER}/aloha_test \
+ --control.episode=0
+```
+
+## Train a policy
+
+To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
+
+```bash
+python -m lerobot.scripts.train \
+ --dataset.repo_id=${HF_USER}/aloha_test \
+ --policy.type=act \
+ --output_dir=outputs/train/act_aloha_test \
+ --job_name=act_aloha_test \
+ --policy.device=cuda \
+ --wandb.enable=true
+```
+
+Let's explain it:
+
+1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`.
+2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
+3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
+4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
+
+For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
+
+Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`.
+
+## Evaluate your policy
+
+You can use the `record` function from [`lerobot/scripts/control_robot.py`](../src/lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
+
+```bash
+python lerobot/scripts/control_robot.py \
+ --robot.type=aloha \
+ --control.type=record \
+ --control.fps=30 \
+ --control.single_task="Grasp a lego block and put it in the bin." \
+ --control.repo_id=${HF_USER}/eval_act_aloha_test \
+ --control.tags='["tutorial"]' \
+ --control.warmup_time_s=5 \
+ --control.episode_time_s=30 \
+ --control.reset_time_s=30 \
+ --control.num_episodes=10 \
+ --control.push_to_hub=true \
+ --control.policy.path=outputs/train/act_aloha_test/checkpoints/last/pretrained_model \
+ --control.num_image_writer_processes=1
+```
+
+As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
+
+1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`).
+2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_aloha_test`).
+3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constant 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`.
+
+## More
+
+Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explanation.
+
+If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
diff --git a/src/lerobot/robots/viperx/__init__.py b/src/lerobot/robots/viperx/__init__.py
new file mode 100644
index 0000000000..bfba07fc79
--- /dev/null
+++ b/src/lerobot/robots/viperx/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_viperx import ViperXConfig
+from .viperx import ViperX
diff --git a/src/lerobot/robots/viperx/config_viperx.py b/src/lerobot/robots/viperx/config_viperx.py
new file mode 100644
index 0000000000..4922f1d185
--- /dev/null
+++ b/src/lerobot/robots/viperx/config_viperx.py
@@ -0,0 +1,45 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+
+from lerobot.cameras import CameraConfig
+
+from ..config import RobotConfig
+
+
+@RobotConfig.register_subclass("viperx")
+@dataclass
+class ViperXConfig(RobotConfig):
+ port: str # Port to connect to the arm
+
+ disable_torque_on_disconnect: bool = True
+
+ # /!\ FOR SAFETY, READ THIS /!\
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
+ # the number of motors in your follower arms.
+ # For Aloha, for every goal position request, motor rotations are capped at 5 degrees by default.
+ # When you feel more confident with teleoperation or running the policy, you can extend
+ # this safety limit and even removing it by setting it to `null`.
+ # Also, everything is expected to work safely out-of-the-box, but we highly advise to
+ # first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml),
+ # then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully
+ max_relative_target: int | None = 5
+
+ # cameras
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
+ # Troubleshooting: If one of your IntelRealSense cameras freeze during
+ # data recording due to bandwidth limit, you might need to plug the camera
+ # on another USB hub or PCIe card.
diff --git a/src/lerobot/robots/viperx/viperx.py b/src/lerobot/robots/viperx/viperx.py
new file mode 100644
index 0000000000..881640cd5f
--- /dev/null
+++ b/src/lerobot/robots/viperx/viperx.py
@@ -0,0 +1,233 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from functools import cached_property
+from typing import Any
+
+from lerobot.cameras.utils import make_cameras_from_configs
+from lerobot.constants import OBS_STATE
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.dynamixel import (
+ DynamixelMotorsBus,
+ OperatingMode,
+)
+
+from ..robot import Robot
+from ..utils import ensure_safe_goal_position
+from .config_viperx import ViperXConfig
+
+logger = logging.getLogger(__name__)
+
+
+class ViperX(Robot):
+ """
+ [ViperX](https://www.trossenrobotics.com/viperx-300) developed by Trossen Robotics
+ """
+
+ config_class = ViperXConfig
+ name = "viperx"
+
+ def __init__(
+ self,
+ config: ViperXConfig,
+ ):
+ raise NotImplementedError
+ super().__init__(config)
+ self.config = config
+ self.bus = DynamixelMotorsBus(
+ port=self.config.port,
+ motors={
+ "waist": Motor(1, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "shoulder": Motor(2, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "shoulder_shadow": Motor(3, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "elbow": Motor(4, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "elbow_shadow": Motor(5, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "forearm_roll": Motor(6, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "wrist_angle": Motor(7, "xm540-w270", MotorNormMode.RANGE_M100_100),
+ "wrist_rotate": Motor(8, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "gripper": Motor(9, "xm430-w350", MotorNormMode.RANGE_0_100),
+ },
+ )
+ self.cameras = make_cameras_from_configs(config.cameras)
+
+ @property
+ def _motors_ft(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def _cameras_ft(self) -> dict[str, tuple]:
+ return {
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
+ }
+
+ @cached_property
+ def observation_features(self) -> dict[str, type | tuple]:
+ return {**self._motors_ft, **self._cameras_ft}
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return self._motors_ft
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
+
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ We assume that at connection time, arm is in a rest position,
+ and torque can be safely disabled to run calibration.
+ """
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ for cam in self.cameras.values():
+ cam.connect()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ raise NotImplementedError # TODO(aliberts): adapt code below (copied from koch
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ input("Move robot to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ full_turn_motors = ["shoulder_pan", "wrist_roll"]
+ unknown_range_motors = [motor for motor in self.bus.motors if motor not in full_turn_motors]
+ print(
+ f"Move all joints except {full_turn_motors} sequentially through their entire "
+ "ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ for motor in full_turn_motors:
+ range_mins[motor] = 0
+ range_maxes[motor] = 4095
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ logger.info(f"Calibration saved to {self.calibration_fpath}")
+
+ def configure(self) -> None:
+ with self.bus.torque_disabled():
+ self.bus.configure_motors()
+
+ # Set secondary/shadow ID for shoulder and elbow. These joints have two motors.
+ # As a result, if only one of them is required to move to a certain position,
+ # the other will follow. This is to avoid breaking the motors.
+ self.bus.write("Secondary_ID", "shoulder_shadow", 2)
+ self.bus.write("Secondary_ID", "elbow_shadow", 4)
+
+ # Set a velocity limit of 131 as advised by Trossen Robotics
+ # TODO(aliberts): remove as it's actually useless in position control
+ self.bus.write("Velocity_Limit", 131)
+
+ # Use 'extended position mode' for all motors except gripper, because in joint mode the servos
+ # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling
+ # the arm, you could end up with a servo with a position 0 or 4095 at a crucial point.
+ # See: https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11
+ for motor in self.bus.motors:
+ if motor != "gripper":
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ # Use 'position control current based' for follower gripper to be limited by the limit of the
+ # current. It can grasp an object without forcing too much even tho, it's goal position is a
+ # complete grasp (both gripper fingers are ordered to join and reach a touch).
+ self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value)
+
+ def get_observation(self) -> dict[str, Any]:
+ """The returned observations do not have a batch dimension."""
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ obs_dict = {}
+
+ # Read arm position
+ start = time.perf_counter()
+ obs_dict[OBS_STATE] = self.bus.sync_read("Present_Position")
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
+
+ # Capture images from cameras
+ for cam_key, cam in self.cameras.items():
+ start = time.perf_counter()
+ obs_dict[cam_key] = cam.async_read()
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
+
+ return obs_dict
+
+ def send_action(self, action: dict[str, float]) -> dict[str, float]:
+ """Command arm to move to a target joint configuration.
+
+ The relative action magnitude may be clipped depending on the configuration parameter
+ `max_relative_target`. In this case, the action sent differs from original action.
+ Thus, this function always returns the action actually sent.
+
+ Args:
+ action (dict[str, float]): The goal positions for the motors.
+
+ Returns:
+ dict[str, float]: The action sent to the motors, potentially clipped.
+ """
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
+
+ # Cap goal position when too far away from present position.
+ # /!\ Slower fps expected due to reading from the follower.
+ if self.config.max_relative_target is not None:
+ present_pos = self.bus.sync_read("Present_Position")
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
+
+ # Send goal position to the arm
+ self.bus.sync_write("Goal_Position", goal_pos)
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
+
+ def disconnect(self):
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
+ for cam in self.cameras.values():
+ cam.disconnect()
+
+ logger.info(f"{self} disconnected.")
diff --git a/lerobot/scripts/display_sys_info.py b/src/lerobot/scripts/display_sys_info.py
similarity index 100%
rename from lerobot/scripts/display_sys_info.py
rename to src/lerobot/scripts/display_sys_info.py
diff --git a/src/lerobot/scripts/eval.py b/src/lerobot/scripts/eval.py
new file mode 100644
index 0000000000..7c5aec48a9
--- /dev/null
+++ b/src/lerobot/scripts/eval.py
@@ -0,0 +1,506 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Evaluate a policy on an environment by running rollouts and computing metrics.
+
+Usage examples:
+
+You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/diffusion_pusht)
+for 10 episodes.
+
+```
+python -m lerobot.scripts.eval \
+ --policy.path=lerobot/diffusion_pusht \
+ --env.type=pusht \
+ --eval.batch_size=10 \
+ --eval.n_episodes=10 \
+ --use_amp=false \
+ --device=cuda
+```
+
+OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes.
+```
+python -m lerobot.scripts.eval \
+ --policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \
+ --env.type=pusht \
+ --eval.batch_size=10 \
+ --eval.n_episodes=10 \
+ --use_amp=false \
+ --device=cuda
+```
+
+Note that in both examples, the repo/folder should contain at least `config.json` and `model.safetensors` files.
+
+You can learn about the CLI options for this script in the `EvalPipelineConfig` in lerobot/configs/eval.py
+"""
+
+import json
+import logging
+import threading
+import time
+from collections.abc import Callable
+from contextlib import nullcontext
+from copy import deepcopy
+from dataclasses import asdict
+from pathlib import Path
+from pprint import pformat
+
+import einops
+import gymnasium as gym
+import numpy as np
+import torch
+from termcolor import colored
+from torch import Tensor, nn
+from tqdm import trange
+
+from lerobot.configs import parser
+from lerobot.configs.eval import EvalPipelineConfig
+from lerobot.envs.factory import make_env
+from lerobot.envs.utils import add_envs_task, check_env_attributes_and_types, preprocess_observation
+from lerobot.policies.factory import make_policy
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.utils import get_device_from_parameters
+from lerobot.utils.io_utils import write_video
+from lerobot.utils.random_utils import set_seed
+from lerobot.utils.utils import (
+ get_safe_torch_device,
+ init_logging,
+ inside_slurm,
+)
+
+
+def rollout(
+ env: gym.vector.VectorEnv,
+ policy: PreTrainedPolicy,
+ seeds: list[int] | None = None,
+ return_observations: bool = False,
+ render_callback: Callable[[gym.vector.VectorEnv], None] | None = None,
+) -> dict:
+ """Run a batched policy rollout once through a batch of environments.
+
+ Note that all environments in the batch are run until the last environment is done. This means some
+ data will probably need to be discarded (for environments that aren't the first one to be done).
+
+ The return dictionary contains:
+ (optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation
+ keys. NOTE that this has an extra sequence element relative to the other keys in the
+ dictionary. This is because an extra observation is included for after the environment is
+ terminated or truncated.
+ "action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
+ including the last observations).
+ "reward": A (batch, sequence) tensor of rewards received for applying the actions.
+ "success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon
+ environment termination/truncation).
+ "done": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element,
+ the first True is followed by True's all the way till the end. This can be used for masking
+ extraneous elements from the sequences above.
+
+ Args:
+ env: The batch of environments.
+ policy: The policy. Must be a PyTorch nn module.
+ seeds: The environments are seeded once at the start of the rollout. If provided, this argument
+ specifies the seeds for each of the environments.
+ return_observations: Whether to include all observations in the returned rollout data. Observations
+ are returned optionally because they typically take more memory to cache. Defaults to False.
+ render_callback: Optional rendering callback to be used after the environments are reset, and after
+ every step.
+ Returns:
+ The dictionary described above.
+ """
+ assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module."
+ device = get_device_from_parameters(policy)
+
+ # Reset the policy and environments.
+ policy.reset()
+ observation, info = env.reset(seed=seeds)
+ if render_callback is not None:
+ render_callback(env)
+
+ all_observations = []
+ all_actions = []
+ all_rewards = []
+ all_successes = []
+ all_dones = []
+
+ step = 0
+ # Keep track of which environments are done.
+ done = np.array([False] * env.num_envs)
+ max_steps = env.call("_max_episode_steps")[0]
+ progbar = trange(
+ max_steps,
+ desc=f"Running rollout with at most {max_steps} steps",
+ disable=inside_slurm(), # we dont want progress bar when we use slurm, since it clutters the logs
+ leave=False,
+ )
+ check_env_attributes_and_types(env)
+ while not np.all(done):
+ # Numpy array to tensor and changing dictionary keys to LeRobot policy format.
+ observation = preprocess_observation(observation)
+ if return_observations:
+ all_observations.append(deepcopy(observation))
+
+ observation = {
+ key: observation[key].to(device, non_blocking=device.type == "cuda") for key in observation
+ }
+
+ # Infer "task" from attributes of environments.
+ # TODO: works with SyncVectorEnv but not AsyncVectorEnv
+ observation = add_envs_task(env, observation)
+
+ with torch.inference_mode():
+ action = policy.select_action(observation)
+
+ # Convert to CPU / numpy.
+ action = action.to("cpu").numpy()
+ assert action.ndim == 2, "Action dimensions should be (batch, action_dim)"
+
+ # Apply the next action.
+ observation, reward, terminated, truncated, info = env.step(action)
+ if render_callback is not None:
+ render_callback(env)
+
+ # VectorEnv stores is_success in `info["final_info"][env_index]["is_success"]`. "final_info" isn't
+ # available of none of the envs finished.
+ if "final_info" in info:
+ successes = [info["is_success"] if info is not None else False for info in info["final_info"]]
+ else:
+ successes = [False] * env.num_envs
+
+ # Keep track of which environments are done so far.
+ done = terminated | truncated | done
+
+ all_actions.append(torch.from_numpy(action))
+ all_rewards.append(torch.from_numpy(reward))
+ all_dones.append(torch.from_numpy(done))
+ all_successes.append(torch.tensor(successes))
+
+ step += 1
+ running_success_rate = (
+ einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean()
+ )
+ progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"})
+ progbar.update()
+
+ # Track the final observation.
+ if return_observations:
+ observation = preprocess_observation(observation)
+ all_observations.append(deepcopy(observation))
+
+ # Stack the sequence along the first dimension so that we have (batch, sequence, *) tensors.
+ ret = {
+ "action": torch.stack(all_actions, dim=1),
+ "reward": torch.stack(all_rewards, dim=1),
+ "success": torch.stack(all_successes, dim=1),
+ "done": torch.stack(all_dones, dim=1),
+ }
+ if return_observations:
+ stacked_observations = {}
+ for key in all_observations[0]:
+ stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1)
+ ret["observation"] = stacked_observations
+
+ if hasattr(policy, "use_original_modules"):
+ policy.use_original_modules()
+
+ return ret
+
+
+def eval_policy(
+ env: gym.vector.VectorEnv,
+ policy: PreTrainedPolicy,
+ n_episodes: int,
+ max_episodes_rendered: int = 0,
+ videos_dir: Path | None = None,
+ return_episode_data: bool = False,
+ start_seed: int | None = None,
+) -> dict:
+ """
+ Args:
+ env: The batch of environments.
+ policy: The policy.
+ n_episodes: The number of episodes to evaluate.
+ max_episodes_rendered: Maximum number of episodes to render into videos.
+ videos_dir: Where to save rendered videos.
+ return_episode_data: Whether to return episode data for online training. Incorporates the data into
+ the "episodes" key of the returned dictionary.
+ start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the
+ seed is incremented by 1. If not provided, the environments are not manually seeded.
+ Returns:
+ Dictionary with metrics and data regarding the rollouts.
+ """
+ if max_episodes_rendered > 0 and not videos_dir:
+ raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.")
+
+ if not isinstance(policy, PreTrainedPolicy):
+ raise ValueError(
+ f"Policy of type 'PreTrainedPolicy' is expected, but type '{type(policy)}' was provided."
+ )
+
+ start = time.time()
+ policy.eval()
+
+ # Determine how many batched rollouts we need to get n_episodes. Note that if n_episodes is not evenly
+ # divisible by env.num_envs we end up discarding some data in the last batch.
+ n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0)
+
+ # Keep track of some metrics.
+ sum_rewards = []
+ max_rewards = []
+ all_successes = []
+ all_seeds = []
+ threads = [] # for video saving threads
+ n_episodes_rendered = 0 # for saving the correct number of videos
+
+ # Callback for visualization.
+ def render_frame(env: gym.vector.VectorEnv):
+ # noqa: B023
+ if n_episodes_rendered >= max_episodes_rendered:
+ return
+ n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs)
+ if isinstance(env, gym.vector.SyncVectorEnv):
+ ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)])) # noqa: B023
+ elif isinstance(env, gym.vector.AsyncVectorEnv):
+ # Here we must render all frames and discard any we don't need.
+ ep_frames.append(np.stack(env.call("render")[:n_to_render_now]))
+
+ if max_episodes_rendered > 0:
+ video_paths: list[str] = []
+
+ if return_episode_data:
+ episode_data: dict | None = None
+
+ # we dont want progress bar when we use slurm, since it clutters the logs
+ progbar = trange(n_batches, desc="Stepping through eval batches", disable=inside_slurm())
+ for batch_ix in progbar:
+ # Cache frames for rendering videos. Each item will be (b, h, w, c), and the list indexes the rollout
+ # step.
+ if max_episodes_rendered > 0:
+ ep_frames: list[np.ndarray] = []
+
+ if start_seed is None:
+ seeds = None
+ else:
+ seeds = range(
+ start_seed + (batch_ix * env.num_envs), start_seed + ((batch_ix + 1) * env.num_envs)
+ )
+ rollout_data = rollout(
+ env,
+ policy,
+ seeds=list(seeds) if seeds else None,
+ return_observations=return_episode_data,
+ render_callback=render_frame if max_episodes_rendered > 0 else None,
+ )
+
+ # Figure out where in each rollout sequence the first done condition was encountered (results after
+ # this won't be included).
+ n_steps = rollout_data["done"].shape[1]
+ # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker.
+ done_indices = torch.argmax(rollout_data["done"].to(int), dim=1)
+
+ # Make a mask with shape (batch, n_steps) to mask out rollout data after the first done
+ # (batch-element-wise). Note the `done_indices + 1` to make sure to keep the data from the done step.
+ mask = (torch.arange(n_steps) <= einops.repeat(done_indices + 1, "b -> b s", s=n_steps)).int()
+ # Extend metrics.
+ batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum")
+ sum_rewards.extend(batch_sum_rewards.tolist())
+ batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max")
+ max_rewards.extend(batch_max_rewards.tolist())
+ batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any")
+ all_successes.extend(batch_successes.tolist())
+ if seeds:
+ all_seeds.extend(seeds)
+ else:
+ all_seeds.append(None)
+
+ # FIXME: episode_data is either None or it doesn't exist
+ if return_episode_data:
+ this_episode_data = _compile_episode_data(
+ rollout_data,
+ done_indices,
+ start_episode_index=batch_ix * env.num_envs,
+ start_data_index=(0 if episode_data is None else (episode_data["index"][-1].item() + 1)),
+ fps=env.unwrapped.metadata["render_fps"],
+ )
+ if episode_data is None:
+ episode_data = this_episode_data
+ else:
+ # Some sanity checks to make sure we are correctly compiling the data.
+ assert episode_data["episode_index"][-1] + 1 == this_episode_data["episode_index"][0]
+ assert episode_data["index"][-1] + 1 == this_episode_data["index"][0]
+ # Concatenate the episode data.
+ episode_data = {k: torch.cat([episode_data[k], this_episode_data[k]]) for k in episode_data}
+
+ # Maybe render video for visualization.
+ if max_episodes_rendered > 0 and len(ep_frames) > 0:
+ batch_stacked_frames = np.stack(ep_frames, axis=1) # (b, t, *)
+ for stacked_frames, done_index in zip(
+ batch_stacked_frames, done_indices.flatten().tolist(), strict=False
+ ):
+ if n_episodes_rendered >= max_episodes_rendered:
+ break
+
+ videos_dir.mkdir(parents=True, exist_ok=True)
+ video_path = videos_dir / f"eval_episode_{n_episodes_rendered}.mp4"
+ video_paths.append(str(video_path))
+ thread = threading.Thread(
+ target=write_video,
+ args=(
+ str(video_path),
+ stacked_frames[: done_index + 1], # + 1 to capture the last observation
+ env.unwrapped.metadata["render_fps"],
+ ),
+ )
+ thread.start()
+ threads.append(thread)
+ n_episodes_rendered += 1
+
+ progbar.set_postfix(
+ {"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"}
+ )
+
+ # Wait till all video rendering threads are done.
+ for thread in threads:
+ thread.join()
+
+ # Compile eval info.
+ info = {
+ "per_episode": [
+ {
+ "episode_ix": i,
+ "sum_reward": sum_reward,
+ "max_reward": max_reward,
+ "success": success,
+ "seed": seed,
+ }
+ for i, (sum_reward, max_reward, success, seed) in enumerate(
+ zip(
+ sum_rewards[:n_episodes],
+ max_rewards[:n_episodes],
+ all_successes[:n_episodes],
+ all_seeds[:n_episodes],
+ strict=True,
+ )
+ )
+ ],
+ "aggregated": {
+ "avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])),
+ "avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])),
+ "pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100),
+ "eval_s": time.time() - start,
+ "eval_ep_s": (time.time() - start) / n_episodes,
+ },
+ }
+
+ if return_episode_data:
+ info["episodes"] = episode_data
+
+ if max_episodes_rendered > 0:
+ info["video_paths"] = video_paths
+
+ return info
+
+
+def _compile_episode_data(
+ rollout_data: dict, done_indices: Tensor, start_episode_index: int, start_data_index: int, fps: float
+) -> dict:
+ """Convenience function for `eval_policy(return_episode_data=True)`
+
+ Compiles all the rollout data into a Hugging Face dataset.
+
+ Similar logic is implemented when datasets are pushed to hub (see: `push_to_hub`).
+ """
+ ep_dicts = []
+ total_frames = 0
+ for ep_ix in range(rollout_data["action"].shape[0]):
+ # + 2 to include the first done frame and the last observation frame.
+ num_frames = done_indices[ep_ix].item() + 2
+ total_frames += num_frames
+
+ # Here we do `num_frames - 1` as we don't want to include the last observation frame just yet.
+ ep_dict = {
+ "action": rollout_data["action"][ep_ix, : num_frames - 1],
+ "episode_index": torch.tensor([start_episode_index + ep_ix] * (num_frames - 1)),
+ "frame_index": torch.arange(0, num_frames - 1, 1),
+ "timestamp": torch.arange(0, num_frames - 1, 1) / fps,
+ "next.done": rollout_data["done"][ep_ix, : num_frames - 1],
+ "next.success": rollout_data["success"][ep_ix, : num_frames - 1],
+ "next.reward": rollout_data["reward"][ep_ix, : num_frames - 1].type(torch.float32),
+ }
+
+ # For the last observation frame, all other keys will just be copy padded.
+ for k in ep_dict:
+ ep_dict[k] = torch.cat([ep_dict[k], ep_dict[k][-1:]])
+
+ for key in rollout_data["observation"]:
+ ep_dict[key] = rollout_data["observation"][key][ep_ix, :num_frames]
+
+ ep_dicts.append(ep_dict)
+
+ data_dict = {}
+ for key in ep_dicts[0]:
+ data_dict[key] = torch.cat([x[key] for x in ep_dicts])
+
+ data_dict["index"] = torch.arange(start_data_index, start_data_index + total_frames, 1)
+
+ return data_dict
+
+
+@parser.wrap()
+def eval_main(cfg: EvalPipelineConfig):
+ logging.info(pformat(asdict(cfg)))
+
+ # Check device is available
+ device = get_safe_torch_device(cfg.policy.device, log=True)
+
+ torch.backends.cudnn.benchmark = True
+ torch.backends.cuda.matmul.allow_tf32 = True
+ set_seed(cfg.seed)
+
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
+
+ logging.info("Making environment.")
+ env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
+
+ logging.info("Making policy.")
+
+ policy = make_policy(
+ cfg=cfg.policy,
+ env_cfg=cfg.env,
+ )
+ policy.eval()
+
+ with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext():
+ info = eval_policy(
+ env,
+ policy,
+ cfg.eval.n_episodes,
+ max_episodes_rendered=10,
+ videos_dir=Path(cfg.output_dir) / "videos",
+ start_seed=cfg.seed,
+ )
+ print(info["aggregated"])
+
+ # Save info
+ with open(Path(cfg.output_dir) / "eval_info.json", "w") as f:
+ json.dump(info, f, indent=2)
+
+ env.close()
+
+ logging.info("End of eval")
+
+
+if __name__ == "__main__":
+ init_logging()
+ eval_main()
diff --git a/src/lerobot/scripts/find_joint_limits.py b/src/lerobot/scripts/find_joint_limits.py
new file mode 100644
index 0000000000..f7e07514f5
--- /dev/null
+++ b/src/lerobot/scripts/find_joint_limits.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Simple script to control a robot from teleoperation.
+
+Example:
+
+```shell
+python -m lerobot.scripts.server.find_joint_limits \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.id=black \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=blue
+```
+"""
+
+import time
+from dataclasses import dataclass
+
+import draccus
+import numpy as np
+
+from lerobot.model.kinematics import RobotKinematics
+from lerobot.robots import ( # noqa: F401
+ RobotConfig,
+ koch_follower,
+ make_robot_from_config,
+ so100_follower,
+)
+from lerobot.teleoperators import ( # noqa: F401
+ TeleoperatorConfig,
+ gamepad,
+ koch_leader,
+ make_teleoperator_from_config,
+ so100_leader,
+)
+from lerobot.utils.robot_utils import busy_wait
+
+
+@dataclass
+class FindJointLimitsConfig:
+ teleop: TeleoperatorConfig
+ robot: RobotConfig
+ # Limit the maximum frames per second. By default, no limit.
+ teleop_time_s: float = 30
+ # Display all cameras on screen
+ display_data: bool = False
+
+
+@draccus.wrap()
+def find_joint_and_ee_bounds(cfg: FindJointLimitsConfig):
+ teleop = make_teleoperator_from_config(cfg.teleop)
+ robot = make_robot_from_config(cfg.robot)
+
+ teleop.connect()
+ robot.connect()
+
+ start_episode_t = time.perf_counter()
+ robot_type = getattr(robot.config, "robot_type", "so101")
+ if "so100" in robot_type or "so101" in robot_type:
+ # Note to be compatible with the rest of the codebase,
+ # we are using the new calibration method for so101 and so100
+ robot_type = "so_new_calibration"
+ kinematics = RobotKinematics(cfg.robot.urdf_path, cfg.robot.target_frame_name)
+
+ # Initialize min/max values
+ observation = robot.get_observation()
+ joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors])
+ ee_pos = kinematics.forward_kinematics(joint_positions)[:3, 3]
+
+ max_pos = joint_positions.copy()
+ min_pos = joint_positions.copy()
+ max_ee = ee_pos.copy()
+ min_ee = ee_pos.copy()
+
+ while True:
+ action = teleop.get_action()
+ robot.send_action(action)
+
+ observation = robot.get_observation()
+ joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors])
+ ee_pos = kinematics.forward_kinematics(joint_positions)[:3, 3]
+
+ # Skip initial warmup period
+ if (time.perf_counter() - start_episode_t) < 5:
+ continue
+
+ # Update min/max values
+ max_ee = np.maximum(max_ee, ee_pos)
+ min_ee = np.minimum(min_ee, ee_pos)
+ max_pos = np.maximum(max_pos, joint_positions)
+ min_pos = np.minimum(min_pos, joint_positions)
+
+ if time.perf_counter() - start_episode_t > cfg.teleop_time_s:
+ print(f"Max ee position {np.round(max_ee, 4).tolist()}")
+ print(f"Min ee position {np.round(min_ee, 4).tolist()}")
+ print(f"Max joint pos position {np.round(max_pos, 4).tolist()}")
+ print(f"Min joint pos position {np.round(min_pos, 4).tolist()}")
+ break
+
+ busy_wait(0.01)
+
+
+if __name__ == "__main__":
+ find_joint_and_ee_bounds()
diff --git a/src/lerobot/scripts/rl/actor.py b/src/lerobot/scripts/rl/actor.py
new file mode 100644
index 0000000000..1c8f9286bf
--- /dev/null
+++ b/src/lerobot/scripts/rl/actor.py
@@ -0,0 +1,702 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Actor server runner for distributed HILSerl robot policy training.
+
+This script implements the actor component of the distributed HILSerl architecture.
+It executes the policy in the robot environment, collects experience,
+and sends transitions to the learner server for policy updates.
+
+Examples of usage:
+
+- Start an actor server for real robot training with human-in-the-loop intervention:
+```bash
+python -m lerobot.scripts.rl.actor --config_path src/lerobot/configs/train_config_hilserl_so100.json
+```
+
+**NOTE**: The actor server requires a running learner server to connect to. Ensure the learner
+server is started before launching the actor.
+
+**NOTE**: Human intervention is key to HILSerl training. Press the upper right trigger button on the
+gamepad to take control of the robot during training. Initially intervene frequently, then gradually
+reduce interventions as the policy improves.
+
+**WORKFLOW**:
+1. Determine robot workspace bounds using `find_joint_limits.py`
+2. Record demonstrations with `gym_manipulator.py` in record mode
+3. Process the dataset and determine camera crops with `crop_dataset_roi.py`
+4. Start the learner server with the training configuration
+5. Start this actor server with the same configuration
+6. Use human interventions to guide policy learning
+
+For more details on the complete HILSerl training workflow, see:
+https://github.com/michel-aractingi/lerobot-hilserl-guide
+"""
+
+import logging
+import os
+import time
+from functools import lru_cache
+from queue import Empty
+
+import grpc
+import torch
+from torch import nn
+from torch.multiprocessing import Event, Queue
+
+from lerobot.cameras import opencv # noqa: F401
+from lerobot.configs import parser
+from lerobot.configs.train import TrainRLServerPipelineConfig
+from lerobot.policies.factory import make_policy
+from lerobot.policies.sac.modeling_sac import SACPolicy
+from lerobot.robots import so100_follower # noqa: F401
+from lerobot.scripts.rl.gym_manipulator import make_robot_env
+from lerobot.teleoperators import gamepad, so101_leader # noqa: F401
+from lerobot.transport import services_pb2, services_pb2_grpc
+from lerobot.transport.utils import (
+ bytes_to_state_dict,
+ grpc_channel_options,
+ python_object_to_bytes,
+ receive_bytes_in_chunks,
+ send_bytes_in_chunks,
+ transitions_to_bytes,
+)
+from lerobot.utils.process import ProcessSignalHandler
+from lerobot.utils.queue import get_last_item_from_queue
+from lerobot.utils.random_utils import set_seed
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.transition import (
+ Transition,
+ move_state_dict_to_device,
+ move_transition_to_device,
+)
+from lerobot.utils.utils import (
+ TimerManager,
+ get_safe_torch_device,
+ init_logging,
+)
+
+ACTOR_SHUTDOWN_TIMEOUT = 30
+
+
+#################################################
+# Main entry point #
+#################################################
+
+
+@parser.wrap()
+def actor_cli(cfg: TrainRLServerPipelineConfig):
+ cfg.validate()
+ display_pid = False
+ if not use_threads(cfg):
+ import torch.multiprocessing as mp
+
+ mp.set_start_method("spawn")
+ display_pid = True
+
+ # Create logs directory to ensure it exists
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"actor_{cfg.job_name}.log")
+
+ # Initialize logging with explicit log file
+ init_logging(log_file=log_file, display_pid=display_pid)
+ logging.info(f"Actor logging initialized, writing to {log_file}")
+
+ is_threaded = use_threads(cfg)
+ shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event
+
+ learner_client, grpc_channel = learner_service_client(
+ host=cfg.policy.actor_learner_config.learner_host,
+ port=cfg.policy.actor_learner_config.learner_port,
+ )
+
+ logging.info("[ACTOR] Establishing connection with Learner")
+ if not establish_learner_connection(learner_client, shutdown_event):
+ logging.error("[ACTOR] Failed to establish connection with Learner")
+ return
+
+ if not use_threads(cfg):
+ # If we use multithreading, we can reuse the channel
+ grpc_channel.close()
+ grpc_channel = None
+
+ logging.info("[ACTOR] Connection with Learner established")
+
+ parameters_queue = Queue()
+ transitions_queue = Queue()
+ interactions_queue = Queue()
+
+ concurrency_entity = None
+ if use_threads(cfg):
+ from threading import Thread
+
+ concurrency_entity = Thread
+ else:
+ from multiprocessing import Process
+
+ concurrency_entity = Process
+
+ receive_policy_process = concurrency_entity(
+ target=receive_policy,
+ args=(cfg, parameters_queue, shutdown_event, grpc_channel),
+ daemon=True,
+ )
+
+ transitions_process = concurrency_entity(
+ target=send_transitions,
+ args=(cfg, transitions_queue, shutdown_event, grpc_channel),
+ daemon=True,
+ )
+
+ interactions_process = concurrency_entity(
+ target=send_interactions,
+ args=(cfg, interactions_queue, shutdown_event, grpc_channel),
+ daemon=True,
+ )
+
+ transitions_process.start()
+ interactions_process.start()
+ receive_policy_process.start()
+
+ act_with_policy(
+ cfg=cfg,
+ shutdown_event=shutdown_event,
+ parameters_queue=parameters_queue,
+ transitions_queue=transitions_queue,
+ interactions_queue=interactions_queue,
+ )
+ logging.info("[ACTOR] Policy process joined")
+
+ logging.info("[ACTOR] Closing queues")
+ transitions_queue.close()
+ interactions_queue.close()
+ parameters_queue.close()
+
+ transitions_process.join()
+ logging.info("[ACTOR] Transitions process joined")
+ interactions_process.join()
+ logging.info("[ACTOR] Interactions process joined")
+ receive_policy_process.join()
+ logging.info("[ACTOR] Receive policy process joined")
+
+ logging.info("[ACTOR] join queues")
+ transitions_queue.cancel_join_thread()
+ interactions_queue.cancel_join_thread()
+ parameters_queue.cancel_join_thread()
+
+ logging.info("[ACTOR] queues closed")
+
+
+#################################################
+# Core algorithm functions #
+#################################################
+
+
+def act_with_policy(
+ cfg: TrainRLServerPipelineConfig,
+ shutdown_event: any, # Event,
+ parameters_queue: Queue,
+ transitions_queue: Queue,
+ interactions_queue: Queue,
+):
+ """
+ Executes policy interaction within the environment.
+
+ This function rolls out the policy in the environment, collecting interaction data and pushing it to a queue for streaming to the learner.
+ Once an episode is completed, updated network parameters received from the learner are retrieved from a queue and loaded into the network.
+
+ Args:
+ cfg: Configuration settings for the interaction process.
+ shutdown_event: Event to check if the process should shutdown.
+ parameters_queue: Queue to receive updated network parameters from the learner.
+ transitions_queue: Queue to send transitions to the learner.
+ interactions_queue: Queue to send interactions to the learner.
+ """
+ # Initialize logging for multiprocessing
+ if not use_threads(cfg):
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"actor_policy_{os.getpid()}.log")
+ init_logging(log_file=log_file, display_pid=True)
+ logging.info("Actor policy process logging initialized")
+
+ logging.info("make_env online")
+
+ online_env = make_robot_env(cfg=cfg.env)
+
+ set_seed(cfg.seed)
+ device = get_safe_torch_device(cfg.policy.device, log=True)
+
+ torch.backends.cudnn.benchmark = True
+ torch.backends.cuda.matmul.allow_tf32 = True
+
+ logging.info("make_policy")
+
+ ### Instantiate the policy in both the actor and learner processes
+ ### To avoid sending a SACPolicy object through the port, we create a policy instance
+ ### on both sides, the learner sends the updated parameters every n steps to update the actor's parameters
+ policy: SACPolicy = make_policy(
+ cfg=cfg.policy,
+ env_cfg=cfg.env,
+ )
+ policy = policy.eval()
+ assert isinstance(policy, nn.Module)
+
+ obs, info = online_env.reset()
+
+ # NOTE: For the moment we will solely handle the case of a single environment
+ sum_reward_episode = 0
+ list_transition_to_send_to_learner = []
+ episode_intervention = False
+ # Add counters for intervention rate calculation
+ episode_intervention_steps = 0
+ episode_total_steps = 0
+
+ policy_timer = TimerManager("Policy inference", log=False)
+
+ for interaction_step in range(cfg.policy.online_steps):
+ start_time = time.perf_counter()
+ if shutdown_event.is_set():
+ logging.info("[ACTOR] Shutting down act_with_policy")
+ return
+
+ if interaction_step >= cfg.policy.online_step_before_learning:
+ # Time policy inference and check if it meets FPS requirement
+ with policy_timer:
+ action = policy.select_action(batch=obs)
+ policy_fps = policy_timer.fps_last
+
+ log_policy_frequency_issue(policy_fps=policy_fps, cfg=cfg, interaction_step=interaction_step)
+
+ else:
+ action = online_env.action_space.sample()
+
+ next_obs, reward, done, truncated, info = online_env.step(action)
+
+ sum_reward_episode += float(reward)
+ # Increment total steps counter for intervention rate
+ episode_total_steps += 1
+
+ # NOTE: We override the action if the intervention is True, because the action applied is the intervention action
+ if "is_intervention" in info and info["is_intervention"]:
+ # NOTE: The action space for demonstration before hand is with the full action space
+ # but sometimes for example we want to deactivate the gripper
+ action = info["action_intervention"]
+ episode_intervention = True
+ # Increment intervention steps counter
+ episode_intervention_steps += 1
+
+ list_transition_to_send_to_learner.append(
+ Transition(
+ state=obs,
+ action=action,
+ reward=reward,
+ next_state=next_obs,
+ done=done,
+ truncated=truncated, # TODO: (azouitine) Handle truncation properly
+ complementary_info=info,
+ )
+ )
+ # assign obs to the next obs and continue the rollout
+ obs = next_obs
+
+ if done or truncated:
+ logging.info(f"[ACTOR] Global step {interaction_step}: Episode reward: {sum_reward_episode}")
+
+ update_policy_parameters(policy=policy, parameters_queue=parameters_queue, device=device)
+
+ if len(list_transition_to_send_to_learner) > 0:
+ push_transitions_to_transport_queue(
+ transitions=list_transition_to_send_to_learner,
+ transitions_queue=transitions_queue,
+ )
+ list_transition_to_send_to_learner = []
+
+ stats = get_frequency_stats(policy_timer)
+ policy_timer.reset()
+
+ # Calculate intervention rate
+ intervention_rate = 0.0
+ if episode_total_steps > 0:
+ intervention_rate = episode_intervention_steps / episode_total_steps
+
+ # Send episodic reward to the learner
+ interactions_queue.put(
+ python_object_to_bytes(
+ {
+ "Episodic reward": sum_reward_episode,
+ "Interaction step": interaction_step,
+ "Episode intervention": int(episode_intervention),
+ "Intervention rate": intervention_rate,
+ **stats,
+ }
+ )
+ )
+
+ # Reset intervention counters
+ sum_reward_episode = 0.0
+ episode_intervention = False
+ episode_intervention_steps = 0
+ episode_total_steps = 0
+ obs, info = online_env.reset()
+
+ if cfg.env.fps is not None:
+ dt_time = time.perf_counter() - start_time
+ busy_wait(1 / cfg.env.fps - dt_time)
+
+
+#################################################
+# Communication Functions - Group all gRPC/messaging functions #
+#################################################
+
+
+def establish_learner_connection(
+ stub: services_pb2_grpc.LearnerServiceStub,
+ shutdown_event: Event, # type: ignore
+ attempts: int = 30,
+):
+ """Establish a connection with the learner.
+
+ Args:
+ stub (services_pb2_grpc.LearnerServiceStub): The stub to use for the connection.
+ shutdown_event (Event): The event to check if the connection should be established.
+ attempts (int): The number of attempts to establish the connection.
+ Returns:
+ bool: True if the connection is established, False otherwise.
+ """
+ for _ in range(attempts):
+ if shutdown_event.is_set():
+ logging.info("[ACTOR] Shutting down establish_learner_connection")
+ return False
+
+ # Force a connection attempt and check state
+ try:
+ logging.info("[ACTOR] Send ready message to Learner")
+ if stub.Ready(services_pb2.Empty()) == services_pb2.Empty():
+ return True
+ except grpc.RpcError as e:
+ logging.error(f"[ACTOR] Waiting for Learner to be ready... {e}")
+ time.sleep(2)
+ return False
+
+
+@lru_cache(maxsize=1)
+def learner_service_client(
+ host: str = "127.0.0.1",
+ port: int = 50051,
+) -> tuple[services_pb2_grpc.LearnerServiceStub, grpc.Channel]:
+ """
+ Returns a client for the learner service.
+
+ GRPC uses HTTP/2, which is a binary protocol and multiplexes requests over a single connection.
+ So we need to create only one client and reuse it.
+ """
+
+ channel = grpc.insecure_channel(
+ f"{host}:{port}",
+ grpc_channel_options(),
+ )
+ stub = services_pb2_grpc.LearnerServiceStub(channel)
+ logging.info("[ACTOR] Learner service client created")
+ return stub, channel
+
+
+def receive_policy(
+ cfg: TrainRLServerPipelineConfig,
+ parameters_queue: Queue,
+ shutdown_event: Event, # type: ignore
+ learner_client: services_pb2_grpc.LearnerServiceStub | None = None,
+ grpc_channel: grpc.Channel | None = None,
+):
+ """Receive parameters from the learner.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): The configuration for the actor.
+ parameters_queue (Queue): The queue to receive the parameters.
+ shutdown_event (Event): The event to check if the process should shutdown.
+ """
+ logging.info("[ACTOR] Start receiving parameters from the Learner")
+ if not use_threads(cfg):
+ # Create a process-specific log file
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"actor_receive_policy_{os.getpid()}.log")
+
+ # Initialize logging with explicit log file
+ init_logging(log_file=log_file, display_pid=True)
+ logging.info("Actor receive policy process logging initialized")
+
+ # Setup process handlers to handle shutdown signal
+ # But use shutdown event from the main process
+ _ = ProcessSignalHandler(use_threads=False, display_pid=True)
+
+ if grpc_channel is None or learner_client is None:
+ learner_client, grpc_channel = learner_service_client(
+ host=cfg.policy.actor_learner_config.learner_host,
+ port=cfg.policy.actor_learner_config.learner_port,
+ )
+
+ try:
+ iterator = learner_client.StreamParameters(services_pb2.Empty())
+ receive_bytes_in_chunks(
+ iterator,
+ parameters_queue,
+ shutdown_event,
+ log_prefix="[ACTOR] parameters",
+ )
+
+ except grpc.RpcError as e:
+ logging.error(f"[ACTOR] gRPC error: {e}")
+
+ if not use_threads(cfg):
+ grpc_channel.close()
+ logging.info("[ACTOR] Received policy loop stopped")
+
+
+def send_transitions(
+ cfg: TrainRLServerPipelineConfig,
+ transitions_queue: Queue,
+ shutdown_event: any, # Event,
+ learner_client: services_pb2_grpc.LearnerServiceStub | None = None,
+ grpc_channel: grpc.Channel | None = None,
+) -> services_pb2.Empty:
+ """
+ Sends transitions to the learner.
+
+ This function continuously retrieves messages from the queue and processes:
+
+ - Transition Data:
+ - A batch of transitions (observation, action, reward, next observation) is collected.
+ - Transitions are moved to the CPU and serialized using PyTorch.
+ - The serialized data is wrapped in a `services_pb2.Transition` message and sent to the learner.
+ """
+
+ if not use_threads(cfg):
+ # Create a process-specific log file
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"actor_transitions_{os.getpid()}.log")
+
+ # Initialize logging with explicit log file
+ init_logging(log_file=log_file, display_pid=True)
+ logging.info("Actor transitions process logging initialized")
+
+ if grpc_channel is None or learner_client is None:
+ learner_client, grpc_channel = learner_service_client(
+ host=cfg.policy.actor_learner_config.learner_host,
+ port=cfg.policy.actor_learner_config.learner_port,
+ )
+
+ try:
+ learner_client.SendTransitions(
+ transitions_stream(
+ shutdown_event, transitions_queue, cfg.policy.actor_learner_config.queue_get_timeout
+ )
+ )
+ except grpc.RpcError as e:
+ logging.error(f"[ACTOR] gRPC error: {e}")
+
+ logging.info("[ACTOR] Finished streaming transitions")
+
+ if not use_threads(cfg):
+ grpc_channel.close()
+ logging.info("[ACTOR] Transitions process stopped")
+
+
+def send_interactions(
+ cfg: TrainRLServerPipelineConfig,
+ interactions_queue: Queue,
+ shutdown_event: Event, # type: ignore
+ learner_client: services_pb2_grpc.LearnerServiceStub | None = None,
+ grpc_channel: grpc.Channel | None = None,
+) -> services_pb2.Empty:
+ """
+ Sends interactions to the learner.
+
+ This function continuously retrieves messages from the queue and processes:
+
+ - Interaction Messages:
+ - Contains useful statistics about episodic rewards and policy timings.
+ - The message is serialized using `pickle` and sent to the learner.
+ """
+
+ if not use_threads(cfg):
+ # Create a process-specific log file
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"actor_interactions_{os.getpid()}.log")
+
+ # Initialize logging with explicit log file
+ init_logging(log_file=log_file, display_pid=True)
+ logging.info("Actor interactions process logging initialized")
+
+ # Setup process handlers to handle shutdown signal
+ # But use shutdown event from the main process
+ _ = ProcessSignalHandler(use_threads=False, display_pid=True)
+
+ if grpc_channel is None or learner_client is None:
+ learner_client, grpc_channel = learner_service_client(
+ host=cfg.policy.actor_learner_config.learner_host,
+ port=cfg.policy.actor_learner_config.learner_port,
+ )
+
+ try:
+ learner_client.SendInteractions(
+ interactions_stream(
+ shutdown_event, interactions_queue, cfg.policy.actor_learner_config.queue_get_timeout
+ )
+ )
+ except grpc.RpcError as e:
+ logging.error(f"[ACTOR] gRPC error: {e}")
+
+ logging.info("[ACTOR] Finished streaming interactions")
+
+ if not use_threads(cfg):
+ grpc_channel.close()
+ logging.info("[ACTOR] Interactions process stopped")
+
+
+def transitions_stream(shutdown_event: Event, transitions_queue: Queue, timeout: float) -> services_pb2.Empty: # type: ignore
+ while not shutdown_event.is_set():
+ try:
+ message = transitions_queue.get(block=True, timeout=timeout)
+ except Empty:
+ logging.debug("[ACTOR] Transition queue is empty")
+ continue
+
+ yield from send_bytes_in_chunks(
+ message, services_pb2.Transition, log_prefix="[ACTOR] Send transitions"
+ )
+
+ return services_pb2.Empty()
+
+
+def interactions_stream(
+ shutdown_event: Event,
+ interactions_queue: Queue,
+ timeout: float, # type: ignore
+) -> services_pb2.Empty:
+ while not shutdown_event.is_set():
+ try:
+ message = interactions_queue.get(block=True, timeout=timeout)
+ except Empty:
+ logging.debug("[ACTOR] Interaction queue is empty")
+ continue
+
+ yield from send_bytes_in_chunks(
+ message,
+ services_pb2.InteractionMessage,
+ log_prefix="[ACTOR] Send interactions",
+ )
+
+ return services_pb2.Empty()
+
+
+#################################################
+# Policy functions #
+#################################################
+
+
+def update_policy_parameters(policy: SACPolicy, parameters_queue: Queue, device):
+ bytes_state_dict = get_last_item_from_queue(parameters_queue, block=False)
+ if bytes_state_dict is not None:
+ logging.info("[ACTOR] Load new parameters from Learner.")
+ state_dicts = bytes_to_state_dict(bytes_state_dict)
+
+ # TODO: check encoder parameter synchronization possible issues:
+ # 1. When shared_encoder=True, we're loading stale encoder params from actor's state_dict
+ # instead of the updated encoder params from critic (which is optimized separately)
+ # 2. When freeze_vision_encoder=True, we waste bandwidth sending/loading frozen params
+ # 3. Need to handle encoder params correctly for both actor and discrete_critic
+ # Potential fixes:
+ # - Send critic's encoder state when shared_encoder=True
+ # - Skip encoder params entirely when freeze_vision_encoder=True
+ # - Ensure discrete_critic gets correct encoder state (currently uses encoder_critic)
+
+ # Load actor state dict
+ actor_state_dict = move_state_dict_to_device(state_dicts["policy"], device=device)
+ policy.actor.load_state_dict(actor_state_dict)
+
+ # Load discrete critic if present
+ if hasattr(policy, "discrete_critic") and "discrete_critic" in state_dicts:
+ discrete_critic_state_dict = move_state_dict_to_device(
+ state_dicts["discrete_critic"], device=device
+ )
+ policy.discrete_critic.load_state_dict(discrete_critic_state_dict)
+ logging.info("[ACTOR] Loaded discrete critic parameters from Learner.")
+
+
+#################################################
+# Utilities functions #
+#################################################
+
+
+def push_transitions_to_transport_queue(transitions: list, transitions_queue):
+ """Send transitions to learner in smaller chunks to avoid network issues.
+
+ Args:
+ transitions: List of transitions to send
+ message_queue: Queue to send messages to learner
+ chunk_size: Size of each chunk to send
+ """
+ transition_to_send_to_learner = []
+ for transition in transitions:
+ tr = move_transition_to_device(transition=transition, device="cpu")
+ for key, value in tr["state"].items():
+ if torch.isnan(value).any():
+ logging.warning(f"Found NaN values in transition {key}")
+
+ transition_to_send_to_learner.append(tr)
+
+ transitions_queue.put(transitions_to_bytes(transition_to_send_to_learner))
+
+
+def get_frequency_stats(timer: TimerManager) -> dict[str, float]:
+ """Get the frequency statistics of the policy.
+
+ Args:
+ timer (TimerManager): The timer with collected metrics.
+
+ Returns:
+ dict[str, float]: The frequency statistics of the policy.
+ """
+ stats = {}
+ if timer.count > 1:
+ avg_fps = timer.fps_avg
+ p90_fps = timer.fps_percentile(90)
+ logging.debug(f"[ACTOR] Average policy frame rate: {avg_fps}")
+ logging.debug(f"[ACTOR] Policy frame rate 90th percentile: {p90_fps}")
+ stats = {
+ "Policy frequency [Hz]": avg_fps,
+ "Policy frequency 90th-p [Hz]": p90_fps,
+ }
+ return stats
+
+
+def log_policy_frequency_issue(policy_fps: float, cfg: TrainRLServerPipelineConfig, interaction_step: int):
+ if policy_fps < cfg.env.fps:
+ logging.warning(
+ f"[ACTOR] Policy FPS {policy_fps:.1f} below required {cfg.env.fps} at step {interaction_step}"
+ )
+
+
+def use_threads(cfg: TrainRLServerPipelineConfig) -> bool:
+ return cfg.policy.concurrency.actor == "threads"
+
+
+if __name__ == "__main__":
+ actor_cli()
diff --git a/src/lerobot/scripts/rl/crop_dataset_roi.py b/src/lerobot/scripts/rl/crop_dataset_roi.py
new file mode 100644
index 0000000000..69904b7401
--- /dev/null
+++ b/src/lerobot/scripts/rl/crop_dataset_roi.py
@@ -0,0 +1,312 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import json
+from copy import deepcopy
+from pathlib import Path
+
+import cv2
+import torch
+import torchvision.transforms.functional as F # type: ignore # noqa: N812
+from tqdm import tqdm # type: ignore
+
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+
+
+def select_rect_roi(img):
+ """
+ Allows the user to draw a rectangular ROI on the image.
+
+ The user must click and drag to draw the rectangle.
+ - While dragging, the rectangle is dynamically drawn.
+ - On mouse button release, the rectangle is fixed.
+ - Press 'c' to confirm the selection.
+ - Press 'r' to reset the selection.
+ - Press ESC to cancel.
+
+ Returns:
+ A tuple (top, left, height, width) representing the rectangular ROI,
+ or None if no valid ROI is selected.
+ """
+ # Create a working copy of the image
+ clone = img.copy()
+ working_img = clone.copy()
+
+ roi = None # Will store the final ROI as (top, left, height, width)
+ drawing = False
+ index_x, index_y = -1, -1 # Initial click coordinates
+
+ def mouse_callback(event, x, y, flags, param):
+ nonlocal index_x, index_y, drawing, roi, working_img
+
+ if event == cv2.EVENT_LBUTTONDOWN:
+ # Start drawing: record starting coordinates
+ drawing = True
+ index_x, index_y = x, y
+
+ elif event == cv2.EVENT_MOUSEMOVE:
+ if drawing:
+ # Compute the top-left and bottom-right corners regardless of drag direction
+ top = min(index_y, y)
+ left = min(index_x, x)
+ bottom = max(index_y, y)
+ right = max(index_x, x)
+ # Show a temporary image with the current rectangle drawn
+ temp = working_img.copy()
+ cv2.rectangle(temp, (left, top), (right, bottom), (0, 255, 0), 2)
+ cv2.imshow("Select ROI", temp)
+
+ elif event == cv2.EVENT_LBUTTONUP:
+ # Finish drawing
+ drawing = False
+ top = min(index_y, y)
+ left = min(index_x, x)
+ bottom = max(index_y, y)
+ right = max(index_x, x)
+ height = bottom - top
+ width = right - left
+ roi = (top, left, height, width) # (top, left, height, width)
+ # Draw the final rectangle on the working image and display it
+ working_img = clone.copy()
+ cv2.rectangle(working_img, (left, top), (right, bottom), (0, 255, 0), 2)
+ cv2.imshow("Select ROI", working_img)
+
+ # Create the window and set the callback
+ cv2.namedWindow("Select ROI")
+ cv2.setMouseCallback("Select ROI", mouse_callback)
+ cv2.imshow("Select ROI", working_img)
+
+ print("Instructions for ROI selection:")
+ print(" - Click and drag to draw a rectangular ROI.")
+ print(" - Press 'c' to confirm the selection.")
+ print(" - Press 'r' to reset and draw again.")
+ print(" - Press ESC to cancel the selection.")
+
+ # Wait until the user confirms with 'c', resets with 'r', or cancels with ESC
+ while True:
+ key = cv2.waitKey(1) & 0xFF
+ # Confirm ROI if one has been drawn
+ if key == ord("c") and roi is not None:
+ break
+ # Reset: clear the ROI and restore the original image
+ elif key == ord("r"):
+ working_img = clone.copy()
+ roi = None
+ cv2.imshow("Select ROI", working_img)
+ # Cancel selection for this image
+ elif key == 27: # ESC key
+ roi = None
+ break
+
+ cv2.destroyWindow("Select ROI")
+ return roi
+
+
+def select_square_roi_for_images(images: dict) -> dict:
+ """
+ For each image in the provided dictionary, open a window to allow the user
+ to select a rectangular ROI. Returns a dictionary mapping each key to a tuple
+ (top, left, height, width) representing the ROI.
+
+ Parameters:
+ images (dict): Dictionary where keys are identifiers and values are OpenCV images.
+
+ Returns:
+ dict: Mapping of image keys to the selected rectangular ROI.
+ """
+ selected_rois = {}
+
+ for key, img in images.items():
+ if img is None:
+ print(f"Image for key '{key}' is None, skipping.")
+ continue
+
+ print(f"\nSelect rectangular ROI for image with key: '{key}'")
+ roi = select_rect_roi(img)
+
+ if roi is None:
+ print(f"No valid ROI selected for '{key}'.")
+ else:
+ selected_rois[key] = roi
+ print(f"ROI for '{key}': {roi}")
+
+ return selected_rois
+
+
+def get_image_from_lerobot_dataset(dataset: LeRobotDataset):
+ """
+ Find the first row in the dataset and extract the image in order to be used for the crop.
+ """
+ row = dataset[0]
+ image_dict = {}
+ for k in row:
+ if "image" in k:
+ image_dict[k] = deepcopy(row[k])
+ return image_dict
+
+
+def convert_lerobot_dataset_to_cropper_lerobot_dataset(
+ original_dataset: LeRobotDataset,
+ crop_params_dict: dict[str, tuple[int, int, int, int]],
+ new_repo_id: str,
+ new_dataset_root: str,
+ resize_size: tuple[int, int] = (128, 128),
+ push_to_hub: bool = False,
+ task: str = "",
+) -> LeRobotDataset:
+ """
+ Converts an existing LeRobotDataset by iterating over its episodes and frames,
+ applying cropping and resizing to image observations, and saving a new dataset
+ with the transformed data.
+
+ Args:
+ original_dataset (LeRobotDataset): The source dataset.
+ crop_params_dict (Dict[str, Tuple[int, int, int, int]]):
+ A dictionary mapping observation keys to crop parameters (top, left, height, width).
+ new_repo_id (str): Repository id for the new dataset.
+ new_dataset_root (str): The root directory where the new dataset will be written.
+ resize_size (Tuple[int, int], optional): The target size (height, width) after cropping.
+ Defaults to (128, 128).
+
+ Returns:
+ LeRobotDataset: A new LeRobotDataset where the specified image observations have been cropped
+ and resized.
+ """
+ # 1. Create a new (empty) LeRobotDataset for writing.
+ new_dataset = LeRobotDataset.create(
+ repo_id=new_repo_id,
+ fps=original_dataset.fps,
+ root=new_dataset_root,
+ robot_type=original_dataset.meta.robot_type,
+ features=original_dataset.meta.info["features"],
+ use_videos=len(original_dataset.meta.video_keys) > 0,
+ )
+
+ # Update the metadata for every image key that will be cropped:
+ # (Here we simply set the shape to be the final resize_size.)
+ for key in crop_params_dict:
+ if key in new_dataset.meta.info["features"]:
+ new_dataset.meta.info["features"][key]["shape"] = [3] + list(resize_size)
+
+ # TODO: Directly modify the mp4 video + meta info features, instead of recreating a dataset
+ prev_episode_index = 0
+ for frame_idx in tqdm(range(len(original_dataset))):
+ frame = original_dataset[frame_idx]
+
+ # Create a copy of the frame to add to the new dataset
+ new_frame = {}
+ for key, value in frame.items():
+ if key in ("task_index", "timestamp", "episode_index", "frame_index", "index", "task"):
+ continue
+ if key in ("next.done", "next.reward"):
+ # if not isinstance(value, str) and len(value.shape) == 0:
+ value = value.unsqueeze(0)
+
+ if key in crop_params_dict:
+ top, left, height, width = crop_params_dict[key]
+ # Apply crop then resize.
+ cropped = F.crop(value, top, left, height, width)
+ value = F.resize(cropped, resize_size)
+ value = value.clamp(0, 1)
+ if key.startswith("complementary_info") and isinstance(value, torch.Tensor) and value.dim() == 0:
+ value = value.unsqueeze(0)
+ new_frame[key] = value
+
+ new_dataset.add_frame(new_frame, task=task)
+
+ if frame["episode_index"].item() != prev_episode_index:
+ # Save the episode
+ new_dataset.save_episode()
+ prev_episode_index = frame["episode_index"].item()
+
+ # Save the last episode
+ new_dataset.save_episode()
+
+ if push_to_hub:
+ new_dataset.push_to_hub()
+
+ return new_dataset
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Crop rectangular ROIs from a LeRobot dataset.")
+ parser.add_argument(
+ "--repo-id",
+ type=str,
+ default="lerobot",
+ help="The repository id of the LeRobot dataset to process.",
+ )
+ parser.add_argument(
+ "--root",
+ type=str,
+ default=None,
+ help="The root directory of the LeRobot dataset.",
+ )
+ parser.add_argument(
+ "--crop-params-path",
+ type=str,
+ default=None,
+ help="The path to the JSON file containing the ROIs.",
+ )
+ parser.add_argument(
+ "--push-to-hub",
+ action="store_true",
+ help="Whether to push the new dataset to the hub.",
+ )
+ parser.add_argument(
+ "--task",
+ type=str,
+ default="",
+ help="The natural language task to describe the dataset.",
+ )
+ args = parser.parse_args()
+
+ dataset = LeRobotDataset(repo_id=args.repo_id, root=args.root)
+
+ images = get_image_from_lerobot_dataset(dataset)
+ images = {k: v.cpu().permute(1, 2, 0).numpy() for k, v in images.items()}
+ images = {k: (v * 255).astype("uint8") for k, v in images.items()}
+
+ if args.crop_params_path is None:
+ rois = select_square_roi_for_images(images)
+ else:
+ with open(args.crop_params_path) as f:
+ rois = json.load(f)
+
+ # Print the selected rectangular ROIs
+ print("\nSelected Rectangular Regions of Interest (top, left, height, width):")
+ for key, roi in rois.items():
+ print(f"{key}: {roi}")
+
+ new_repo_id = args.repo_id + "_cropped_resized"
+ new_dataset_root = Path(str(dataset.root) + "_cropped_resized")
+
+ cropped_resized_dataset = convert_lerobot_dataset_to_cropper_lerobot_dataset(
+ original_dataset=dataset,
+ crop_params_dict=rois,
+ new_repo_id=new_repo_id,
+ new_dataset_root=new_dataset_root,
+ resize_size=(128, 128),
+ push_to_hub=args.push_to_hub,
+ task=args.task,
+ )
+
+ meta_dir = new_dataset_root / "meta"
+ meta_dir.mkdir(exist_ok=True)
+
+ with open(meta_dir / "crop_params.json", "w") as f:
+ json.dump(rois, f, indent=4)
diff --git a/src/lerobot/scripts/rl/eval_policy.py b/src/lerobot/scripts/rl/eval_policy.py
new file mode 100644
index 0000000000..aa97483b65
--- /dev/null
+++ b/src/lerobot/scripts/rl/eval_policy.py
@@ -0,0 +1,74 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from lerobot.cameras import opencv # noqa: F401
+from lerobot.configs import parser
+from lerobot.configs.train import TrainRLServerPipelineConfig
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.policies.factory import make_policy
+from lerobot.robots import ( # noqa: F401
+ RobotConfig,
+ make_robot_from_config,
+ so100_follower,
+)
+from lerobot.scripts.rl.gym_manipulator import make_robot_env
+from lerobot.teleoperators import (
+ gamepad, # noqa: F401
+ so101_leader, # noqa: F401
+)
+
+logging.basicConfig(level=logging.INFO)
+
+
+def eval_policy(env, policy, n_episodes):
+ sum_reward_episode = []
+ for _ in range(n_episodes):
+ obs, _ = env.reset()
+ episode_reward = 0.0
+ while True:
+ action = policy.select_action(obs)
+ obs, reward, terminated, truncated, _ = env.step(action)
+ episode_reward += reward
+ if terminated or truncated:
+ break
+ sum_reward_episode.append(episode_reward)
+
+ logging.info(f"Success after 20 steps {sum_reward_episode}")
+ logging.info(f"success rate {sum(sum_reward_episode) / len(sum_reward_episode)}")
+
+
+@parser.wrap()
+def main(cfg: TrainRLServerPipelineConfig):
+ env_cfg = cfg.env
+ env = make_robot_env(env_cfg)
+ dataset_cfg = cfg.dataset
+ dataset = LeRobotDataset(repo_id=dataset_cfg.repo_id)
+ dataset_meta = dataset.meta
+
+ policy = make_policy(
+ cfg=cfg.policy,
+ # env_cfg=cfg.env,
+ ds_meta=dataset_meta,
+ )
+ policy.from_pretrained(env_cfg.pretrained_policy_name_or_path)
+ policy.eval()
+
+ eval_policy(env, policy=policy, n_episodes=10)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/lerobot/scripts/rl/gym_manipulator.py b/src/lerobot/scripts/rl/gym_manipulator.py
new file mode 100644
index 0000000000..c8be6b7dd7
--- /dev/null
+++ b/src/lerobot/scripts/rl/gym_manipulator.py
@@ -0,0 +1,2263 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Robot Environment for LeRobot Manipulation Tasks
+
+This module provides a comprehensive gym-compatible environment for robot manipulation
+with support for:
+- Multiple robot types (SO100, SO101, Koch and Moss)
+- Human intervention via leader-follower control or gamepad
+
+- End-effector and joint space control
+- Image processing (cropping and resizing)
+
+The environment is built using a composable wrapper pattern where each wrapper
+adds specific functionality to the base RobotEnv.
+
+Example:
+ env = make_robot_env(cfg)
+ obs, info = env.reset()
+ action = policy.select_action(obs)
+ obs, reward, terminated, truncated, info = env.step(action)
+"""
+
+import logging
+import time
+from collections import deque
+from collections.abc import Sequence
+from threading import Lock
+from typing import Annotated, Any
+
+import gymnasium as gym
+import numpy as np
+import torch
+import torchvision.transforms.functional as F # noqa: N812
+
+from lerobot.cameras import opencv # noqa: F401
+from lerobot.configs import parser
+from lerobot.envs.configs import EnvConfig
+from lerobot.envs.utils import preprocess_observation
+from lerobot.model.kinematics import RobotKinematics
+from lerobot.robots import ( # noqa: F401
+ RobotConfig,
+ make_robot_from_config,
+ so100_follower,
+)
+from lerobot.teleoperators import (
+ gamepad, # noqa: F401
+ keyboard, # noqa: F401
+ make_teleoperator_from_config,
+ so101_leader, # noqa: F401
+)
+from lerobot.teleoperators.gamepad.teleop_gamepad import GamepadTeleop
+from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardEndEffectorTeleop
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import log_say
+
+logging.basicConfig(level=logging.INFO)
+
+
+def reset_follower_position(robot_arm, target_position):
+ current_position_dict = robot_arm.bus.sync_read("Present_Position")
+ current_position = np.array(
+ [current_position_dict[name] for name in current_position_dict], dtype=np.float32
+ )
+ trajectory = torch.from_numpy(
+ np.linspace(current_position, target_position, 50)
+ ) # NOTE: 30 is just an arbitrary number
+ for pose in trajectory:
+ action_dict = dict(zip(current_position_dict, pose, strict=False))
+ robot_arm.bus.sync_write("Goal_Position", action_dict)
+ busy_wait(0.015)
+
+
+class TorchBox(gym.spaces.Box):
+ """
+ A version of gym.spaces.Box that handles PyTorch tensors.
+
+ This class extends gym.spaces.Box to work with PyTorch tensors,
+ providing compatibility between NumPy arrays and PyTorch tensors.
+ """
+
+ def __init__(
+ self,
+ low: float | Sequence[float] | np.ndarray,
+ high: float | Sequence[float] | np.ndarray,
+ shape: Sequence[int] | None = None,
+ np_dtype: np.dtype | type = np.float32,
+ torch_dtype: torch.dtype = torch.float32,
+ device: str = "cpu",
+ seed: int | np.random.Generator | None = None,
+ ) -> None:
+ """
+ Initialize the PyTorch-compatible Box space.
+
+ Args:
+ low: Lower bounds of the space.
+ high: Upper bounds of the space.
+ shape: Shape of the space. If None, inferred from low and high.
+ np_dtype: NumPy data type for internal storage.
+ torch_dtype: PyTorch data type for tensor conversion.
+ device: PyTorch device for returned tensors.
+ seed: Random seed for sampling.
+ """
+ super().__init__(low, high, shape=shape, dtype=np_dtype, seed=seed)
+ self.torch_dtype = torch_dtype
+ self.device = device
+
+ def sample(self) -> torch.Tensor:
+ """
+ Sample a random point from the space.
+
+ Returns:
+ A PyTorch tensor within the space bounds.
+ """
+ arr = super().sample()
+ return torch.as_tensor(arr, dtype=self.torch_dtype, device=self.device)
+
+ def contains(self, x: torch.Tensor) -> bool:
+ """
+ Check if a tensor is within the space bounds.
+
+ Args:
+ x: The PyTorch tensor to check.
+
+ Returns:
+ Boolean indicating whether the tensor is within bounds.
+ """
+ # Move to CPU/numpy and cast to the internal dtype
+ arr = x.detach().cpu().numpy().astype(self.dtype, copy=False)
+ return super().contains(arr)
+
+ def seed(self, seed: int | np.random.Generator | None = None):
+ """
+ Set the random seed for sampling.
+
+ Args:
+ seed: The random seed to use.
+
+ Returns:
+ List containing the seed.
+ """
+ super().seed(seed)
+ return [seed]
+
+ def __repr__(self) -> str:
+ """
+ Return a string representation of the space.
+
+ Returns:
+ Formatted string with space details.
+ """
+ return (
+ f"TorchBox({self.low_repr}, {self.high_repr}, {self.shape}, "
+ f"np={self.dtype.name}, torch={self.torch_dtype}, device={self.device})"
+ )
+
+
+class TorchActionWrapper(gym.Wrapper):
+ """
+ Wrapper that changes the action space to use PyTorch tensors.
+
+ This wrapper modifies the action space to return PyTorch tensors when sampled
+ and handles converting PyTorch actions to NumPy when stepping the environment.
+ """
+
+ def __init__(self, env: gym.Env, device: str):
+ """
+ Initialize the PyTorch action space wrapper.
+
+ Args:
+ env: The environment to wrap.
+ device: The PyTorch device to use for tensor operations.
+ """
+ super().__init__(env)
+ self.action_space = TorchBox(
+ low=env.action_space.low,
+ high=env.action_space.high,
+ shape=env.action_space.shape,
+ torch_dtype=torch.float32,
+ device=torch.device("cpu"),
+ )
+
+ def step(self, action: torch.Tensor):
+ """
+ Step the environment with a PyTorch tensor action.
+
+ This method handles conversion from PyTorch tensors to NumPy arrays
+ for compatibility with the underlying environment.
+
+ Args:
+ action: PyTorch tensor action to take.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info).
+ """
+ if action.dim() == 2:
+ action = action.squeeze(0)
+ action = action.detach().cpu().numpy()
+ return self.env.step(action)
+
+
+class RobotEnv(gym.Env):
+ """
+ Gym-compatible environment for evaluating robotic control policies with integrated human intervention.
+
+ This environment wraps a robot interface to provide a consistent API for policy evaluation. It supports both relative (delta)
+ and absolute joint position commands and automatically configures its observation and action spaces based on the robot's
+ sensors and configuration.
+ """
+
+ def __init__(
+ self,
+ robot,
+ use_gripper: bool = False,
+ display_cameras: bool = False,
+ ):
+ """
+ Initialize the RobotEnv environment.
+
+ The environment is set up with a robot interface, which is used to capture observations and send joint commands. The setup
+ supports both relative (delta) adjustments and absolute joint positions for controlling the robot.
+
+ Args:
+ robot: The robot interface object used to connect and interact with the physical robot.
+ display_cameras: If True, the robot's camera feeds will be displayed during execution.
+ """
+ super().__init__()
+
+ self.robot = robot
+ self.display_cameras = display_cameras
+
+ # Connect to the robot if not already connected.
+ if not self.robot.is_connected:
+ self.robot.connect()
+
+ # Episode tracking.
+ self.current_step = 0
+ self.episode_data = None
+
+ self._joint_names = [f"{key}.pos" for key in self.robot.bus.motors]
+ self._image_keys = self.robot.cameras.keys()
+
+ self.current_observation = None
+
+ self.use_gripper = use_gripper
+
+ self._setup_spaces()
+
+ def _get_observation(self) -> dict[str, np.ndarray]:
+ """Helper to convert a dictionary from bus.sync_read to an ordered numpy array."""
+ obs_dict = self.robot.get_observation()
+ joint_positions = np.array([obs_dict[name] for name in self._joint_names])
+
+ images = {key: obs_dict[key] for key in self._image_keys}
+ self.current_observation = {"agent_pos": joint_positions, "pixels": images}
+
+ def _setup_spaces(self):
+ """
+ Dynamically configure the observation and action spaces based on the robot's capabilities.
+
+ Observation Space:
+ - For keys with "image": A Box space with pixel values ranging from 0 to 255.
+ - For non-image keys: A nested Dict space is created under 'observation.state' with a suitable range.
+
+ Action Space:
+ - The action space is defined as a Box space representing joint position commands. It is defined as relative (delta)
+ or absolute, based on the configuration.
+ """
+ self._get_observation()
+
+ observation_spaces = {}
+
+ # Define observation spaces for images and other states.
+ if "pixels" in self.current_observation:
+ prefix = "observation.images"
+ observation_spaces = {
+ f"{prefix}.{key}": gym.spaces.Box(
+ low=0, high=255, shape=self.current_observation["pixels"][key].shape, dtype=np.uint8
+ )
+ for key in self.current_observation["pixels"]
+ }
+
+ observation_spaces["observation.state"] = gym.spaces.Box(
+ low=0,
+ high=10,
+ shape=self.current_observation["agent_pos"].shape,
+ dtype=np.float32,
+ )
+
+ self.observation_space = gym.spaces.Dict(observation_spaces)
+
+ # Define the action space for joint positions along with setting an intervention flag.
+ action_dim = 3
+ bounds = {}
+ bounds["min"] = -np.ones(action_dim)
+ bounds["max"] = np.ones(action_dim)
+
+ if self.use_gripper:
+ action_dim += 1
+ bounds["min"] = np.concatenate([bounds["min"], [0]])
+ bounds["max"] = np.concatenate([bounds["max"], [2]])
+
+ self.action_space = gym.spaces.Box(
+ low=bounds["min"],
+ high=bounds["max"],
+ shape=(action_dim,),
+ dtype=np.float32,
+ )
+
+ def reset(self, seed=None, options=None) -> tuple[dict[str, np.ndarray], dict[str, Any]]:
+ """
+ Reset the environment to its initial state.
+ This method resets the step counter and clears any episodic data.
+
+ Args:
+ seed: A seed for random number generation to ensure reproducibility.
+ options: Additional options to influence the reset behavior.
+
+ Returns:
+ A tuple containing:
+ - observation (dict): The initial sensor observation.
+ - info (dict): A dictionary with supplementary information, including the key "is_intervention".
+ """
+ super().reset(seed=seed, options=options)
+
+ self.robot.reset()
+
+ # Reset episode tracking variables.
+ self.current_step = 0
+ self.episode_data = None
+ self.current_observation = None
+ self._get_observation()
+ return self.current_observation, {"is_intervention": False}
+
+ def step(self, action) -> tuple[dict[str, np.ndarray], float, bool, bool, dict[str, Any]]:
+ """
+ Execute a single step within the environment using the specified action.
+
+ The provided action is processed and sent to the robot as joint position commands
+ that may be either absolute values or deltas based on the environment configuration.
+
+ Args:
+ action: The commanded joint positions as a numpy array or torch tensor.
+
+ Returns:
+ A tuple containing:
+ - observation (dict): The new sensor observation after taking the step.
+ - reward (float): The step reward (default is 0.0 within this wrapper).
+ - terminated (bool): True if the episode has reached a terminal state.
+ - truncated (bool): True if the episode was truncated (e.g., time constraints).
+ - info (dict): Additional debugging information including intervention status.
+ """
+ action_dict = {"delta_x": action[0], "delta_y": action[1], "delta_z": action[2]}
+
+ # 1.0 action corresponds to no-op action
+ action_dict["gripper"] = action[3] if self.use_gripper else 1.0
+
+ self.robot.send_action(action_dict)
+
+ self._get_observation()
+
+ if self.display_cameras:
+ self.render()
+
+ self.current_step += 1
+
+ reward = 0.0
+ terminated = False
+ truncated = False
+
+ return (
+ self.current_observation,
+ reward,
+ terminated,
+ truncated,
+ {"is_intervention": False},
+ )
+
+ def render(self):
+ """
+ Render the current state of the environment by displaying the robot's camera feeds.
+ """
+ import cv2
+
+ image_keys = [key for key in self.current_observation if "image" in key]
+
+ for key in image_keys:
+ cv2.imshow(key, cv2.cvtColor(self.current_observation[key].numpy(), cv2.COLOR_RGB2BGR))
+ cv2.waitKey(1)
+
+ def close(self):
+ """
+ Close the environment and clean up resources by disconnecting the robot.
+
+ If the robot is currently connected, this method properly terminates the connection to ensure that all
+ associated resources are released.
+ """
+ if self.robot.is_connected:
+ self.robot.disconnect()
+
+
+class AddJointVelocityToObservation(gym.ObservationWrapper):
+ """
+ Wrapper that adds joint velocity information to the observation.
+
+ This wrapper computes joint velocities by tracking changes in joint positions over time,
+ and extends the observation space to include these velocities.
+ """
+
+ def __init__(self, env, joint_velocity_limits=100.0, fps=30, num_dof=6):
+ """
+ Initialize the joint velocity wrapper.
+
+ Args:
+ env: The environment to wrap.
+ joint_velocity_limits: Maximum expected joint velocity for space bounds.
+ fps: Frames per second used to calculate velocity (position delta / time).
+ num_dof: Number of degrees of freedom (joints) in the robot.
+ """
+ super().__init__(env)
+
+ # Extend observation space to include joint velocities
+ old_low = self.observation_space["observation.state"].low
+ old_high = self.observation_space["observation.state"].high
+ old_shape = self.observation_space["observation.state"].shape
+
+ self.last_joint_positions = np.zeros(num_dof)
+
+ new_low = np.concatenate([old_low, np.ones(num_dof) * -joint_velocity_limits])
+ new_high = np.concatenate([old_high, np.ones(num_dof) * joint_velocity_limits])
+
+ new_shape = (old_shape[0] + num_dof,)
+
+ self.observation_space["observation.state"] = gym.spaces.Box(
+ low=new_low,
+ high=new_high,
+ shape=new_shape,
+ dtype=np.float32,
+ )
+
+ self.dt = 1.0 / fps
+
+ def observation(self, observation):
+ """
+ Add joint velocity information to the observation.
+
+ Args:
+ observation: The original observation from the environment.
+
+ Returns:
+ The modified observation with joint velocities.
+ """
+ joint_velocities = (observation["agent_pos"] - self.last_joint_positions) / self.dt
+ self.last_joint_positions = observation["agent_pos"]
+ observation["agent_pos"] = np.concatenate([observation["agent_pos"], joint_velocities], axis=-1)
+ return observation
+
+
+class AddCurrentToObservation(gym.ObservationWrapper):
+ """
+ Wrapper that adds motor current information to the observation.
+
+ This wrapper extends the observation space to include the current values
+ from each motor, providing information about the forces being applied.
+ """
+
+ def __init__(self, env, max_current=500, num_dof=6):
+ """
+ Initialize the current observation wrapper.
+
+ Args:
+ env: The environment to wrap.
+ max_current: Maximum expected current for space bounds.
+ num_dof: Number of degrees of freedom (joints) in the robot.
+ """
+ super().__init__(env)
+
+ # Extend observation space to include joint velocities
+ old_low = self.observation_space["observation.state"].low
+ old_high = self.observation_space["observation.state"].high
+ old_shape = self.observation_space["observation.state"].shape
+
+ new_low = np.concatenate([old_low, np.zeros(num_dof)])
+ new_high = np.concatenate([old_high, np.ones(num_dof) * max_current])
+
+ new_shape = (old_shape[0] + num_dof,)
+
+ self.observation_space["observation.state"] = gym.spaces.Box(
+ low=new_low,
+ high=new_high,
+ shape=new_shape,
+ dtype=np.float32,
+ )
+
+ def observation(self, observation):
+ """
+ Add current information to the observation.
+
+ Args:
+ observation: The original observation from the environment.
+
+ Returns:
+ The modified observation with current values.
+ """
+ present_current_dict = self.env.unwrapped.robot.bus.sync_read("Present_Current")
+ present_current_observation = np.array(
+ [present_current_dict[name] for name in self.env.unwrapped.robot.bus.motors]
+ )
+ observation["agent_pos"] = np.concatenate(
+ [observation["agent_pos"], present_current_observation], axis=-1
+ )
+ return observation
+
+
+class RewardWrapper(gym.Wrapper):
+ def __init__(self, env, reward_classifier, device="cuda"):
+ """
+ Wrapper to add reward prediction to the environment using a trained classifier.
+
+ Args:
+ env: The environment to wrap.
+ reward_classifier: The reward classifier model.
+ device: The device to run the model on.
+ """
+ self.env = env
+
+ self.device = device
+
+ self.reward_classifier = torch.compile(reward_classifier)
+ self.reward_classifier.to(self.device)
+
+ def step(self, action):
+ """
+ Execute a step and compute the reward using the classifier.
+
+ Args:
+ action: The action to take in the environment.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info).
+ """
+ observation, _, terminated, truncated, info = self.env.step(action)
+
+ images = {}
+ for key in observation:
+ if "image" in key:
+ images[key] = observation[key].to(self.device, non_blocking=(self.device == "cuda"))
+ if images[key].dim() == 3:
+ images[key] = images[key].unsqueeze(0)
+
+ start_time = time.perf_counter()
+ with torch.inference_mode():
+ success = (
+ self.reward_classifier.predict_reward(images, threshold=0.7)
+ if self.reward_classifier is not None
+ else 0.0
+ )
+ info["Reward classifier frequency"] = 1 / (time.perf_counter() - start_time)
+
+ reward = 0.0
+ if success == 1.0:
+ terminated = True
+ reward = 1.0
+
+ return observation, reward, terminated, truncated, info
+
+ def reset(self, seed=None, options=None):
+ """
+ Reset the environment.
+
+ Args:
+ seed: Random seed for reproducibility.
+ options: Additional reset options.
+
+ Returns:
+ The initial observation and info from the wrapped environment.
+ """
+ return self.env.reset(seed=seed, options=options)
+
+
+class TimeLimitWrapper(gym.Wrapper):
+ """
+ Wrapper that adds a time limit to episodes and tracks execution time.
+
+ This wrapper terminates episodes after a specified time has elapsed, providing
+ better control over episode length.
+ """
+
+ def __init__(self, env, control_time_s, fps):
+ """
+ Initialize the time limit wrapper.
+
+ Args:
+ env: The environment to wrap.
+ control_time_s: Maximum episode duration in seconds.
+ fps: Frames per second for calculating the maximum number of steps.
+ """
+ self.env = env
+ self.control_time_s = control_time_s
+ self.fps = fps
+
+ self.last_timestamp = 0.0
+ self.episode_time_in_s = 0.0
+
+ self.max_episode_steps = int(self.control_time_s * self.fps)
+
+ self.current_step = 0
+
+ def step(self, action):
+ """
+ Step the environment and track time elapsed.
+
+ Args:
+ action: The action to take in the environment.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info).
+ """
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ time_since_last_step = time.perf_counter() - self.last_timestamp
+ self.episode_time_in_s += time_since_last_step
+ self.last_timestamp = time.perf_counter()
+ self.current_step += 1
+ # check if last timestep took more time than the expected fps
+ if 1.0 / time_since_last_step < self.fps:
+ logging.debug(f"Current timestep exceeded expected fps {self.fps}")
+
+ if self.current_step >= self.max_episode_steps:
+ terminated = True
+ return obs, reward, terminated, truncated, info
+
+ def reset(self, seed=None, options=None):
+ """
+ Reset the environment and time tracking.
+
+ Args:
+ seed: Random seed for reproducibility.
+ options: Additional reset options.
+
+ Returns:
+ The initial observation and info from the wrapped environment.
+ """
+ self.episode_time_in_s = 0.0
+ self.last_timestamp = time.perf_counter()
+ self.current_step = 0
+ return self.env.reset(seed=seed, options=options)
+
+
+class ImageCropResizeWrapper(gym.Wrapper):
+ """
+ Wrapper that crops and resizes image observations.
+
+ This wrapper processes image observations to focus on relevant regions by
+ cropping and then resizing to a standard size.
+ """
+
+ def __init__(
+ self,
+ env,
+ crop_params_dict: dict[str, Annotated[tuple[int], 4]],
+ resize_size=None,
+ ):
+ """
+ Initialize the image crop and resize wrapper.
+
+ Args:
+ env: The environment to wrap.
+ crop_params_dict: Dictionary mapping image observation keys to crop parameters
+ (top, left, height, width).
+ resize_size: Target size for resized images (height, width). Defaults to (128, 128).
+ """
+ super().__init__(env)
+ self.env = env
+ self.crop_params_dict = crop_params_dict
+ print(f"obs_keys , {self.env.observation_space}")
+ print(f"crop params dict {crop_params_dict.keys()}")
+ for key_crop in crop_params_dict:
+ if key_crop not in self.env.observation_space.keys(): # noqa: SIM118
+ raise ValueError(f"Key {key_crop} not in observation space")
+ for key in crop_params_dict:
+ new_shape = (3, resize_size[0], resize_size[1])
+ self.observation_space[key] = gym.spaces.Box(low=0, high=255, shape=new_shape)
+
+ self.resize_size = resize_size
+ if self.resize_size is None:
+ self.resize_size = (128, 128)
+
+ def step(self, action):
+ """
+ Step the environment and process image observations.
+
+ Args:
+ action: The action to take in the environment.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info) with processed images.
+ """
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ for k in self.crop_params_dict:
+ device = obs[k].device
+ if obs[k].dim() >= 3:
+ # Reshape to combine height and width dimensions for easier calculation
+ batch_size = obs[k].size(0)
+ channels = obs[k].size(1)
+ flattened_spatial_dims = obs[k].view(batch_size, channels, -1)
+
+ # Calculate standard deviation across spatial dimensions (H, W)
+ # If any channel has std=0, all pixels in that channel have the same value
+ # This is helpful if one camera mistakenly covered or the image is black
+ std_per_channel = torch.std(flattened_spatial_dims, dim=2)
+ if (std_per_channel <= 0.02).any():
+ logging.warning(
+ f"Potential hardware issue detected: All pixels have the same value in observation {k}"
+ )
+
+ if device == torch.device("mps:0"):
+ obs[k] = obs[k].cpu()
+
+ obs[k] = F.crop(obs[k], *self.crop_params_dict[k])
+ obs[k] = F.resize(obs[k], self.resize_size)
+ # TODO (michel-aractingi): Bug in resize, it returns values outside [0, 1]
+ obs[k] = obs[k].clamp(0.0, 1.0)
+ obs[k] = obs[k].to(device)
+
+ return obs, reward, terminated, truncated, info
+
+ def reset(self, seed=None, options=None):
+ """
+ Reset the environment and process image observations.
+
+ Args:
+ seed: Random seed for reproducibility.
+ options: Additional reset options.
+
+ Returns:
+ Tuple of (observation, info) with processed images.
+ """
+ obs, info = self.env.reset(seed=seed, options=options)
+ for k in self.crop_params_dict:
+ device = obs[k].device
+ if device == torch.device("mps:0"):
+ obs[k] = obs[k].cpu()
+ obs[k] = F.crop(obs[k], *self.crop_params_dict[k])
+ obs[k] = F.resize(obs[k], self.resize_size)
+ obs[k] = obs[k].clamp(0.0, 1.0)
+ obs[k] = obs[k].to(device)
+ return obs, info
+
+
+class ConvertToLeRobotObservation(gym.ObservationWrapper):
+ """
+ Wrapper that converts standard observations to LeRobot format.
+
+ This wrapper processes observations to match the expected format for LeRobot,
+ including normalizing image values and moving tensors to the specified device.
+ """
+
+ def __init__(self, env, device: str = "cpu"):
+ """
+ Initialize the LeRobot observation converter.
+
+ Args:
+ env: The environment to wrap.
+ device: Target device for the observation tensors.
+ """
+ super().__init__(env)
+
+ self.device = torch.device(device)
+
+ def observation(self, observation):
+ """
+ Convert observations to LeRobot format.
+
+ Args:
+ observation: The original observation from the environment.
+
+ Returns:
+ The processed observation with normalized images and proper tensor formats.
+ """
+ observation = preprocess_observation(observation)
+ observation = {
+ key: observation[key].to(self.device, non_blocking=self.device.type == "cuda")
+ for key in observation
+ }
+ return observation
+
+
+class ResetWrapper(gym.Wrapper):
+ """
+ Wrapper that handles environment reset procedures.
+
+ This wrapper provides additional functionality during environment reset,
+ including the option to reset to a fixed pose or allow manual reset.
+ """
+
+ def __init__(
+ self,
+ env: RobotEnv,
+ reset_pose: np.ndarray | None = None,
+ reset_time_s: float = 5,
+ ):
+ """
+ Initialize the reset wrapper.
+
+ Args:
+ env: The environment to wrap.
+ reset_pose: Fixed joint positions to reset to. If None, manual reset is used.
+ reset_time_s: Time in seconds to wait after reset or allowed for manual reset.
+ """
+ super().__init__(env)
+ self.reset_time_s = reset_time_s
+ self.reset_pose = reset_pose
+ self.robot = self.unwrapped.robot
+
+ def reset(self, *, seed=None, options=None):
+ """
+ Reset the environment with either fixed or manual reset procedure.
+
+ If reset_pose is provided, the robot will move to that position.
+ Otherwise, manual teleoperation control is allowed for reset_time_s seconds.
+
+ Args:
+ seed: Random seed for reproducibility.
+ options: Additional reset options.
+
+ Returns:
+ The initial observation and info from the wrapped environment.
+ """
+ start_time = time.perf_counter()
+ if self.reset_pose is not None:
+ log_say("Reset the environment.", play_sounds=True)
+ reset_follower_position(self.unwrapped.robot, self.reset_pose)
+ log_say("Reset the environment done.", play_sounds=True)
+
+ if hasattr(self.env, "robot_leader"):
+ self.env.robot_leader.bus.sync_write("Torque_Enable", 1)
+ log_say("Reset the leader robot.", play_sounds=True)
+ reset_follower_position(self.env.robot_leader, self.reset_pose)
+ log_say("Reset the leader robot done.", play_sounds=True)
+ else:
+ log_say(
+ f"Manually reset the environment for {self.reset_time_s} seconds.",
+ play_sounds=True,
+ )
+ start_time = time.perf_counter()
+ while time.perf_counter() - start_time < self.reset_time_s:
+ action = self.env.robot_leader.get_action()
+ self.unwrapped.robot.send_action(action)
+
+ log_say("Manual reset of the environment done.", play_sounds=True)
+
+ busy_wait(self.reset_time_s - (time.perf_counter() - start_time))
+
+ return super().reset(seed=seed, options=options)
+
+
+class BatchCompatibleWrapper(gym.ObservationWrapper):
+ """
+ Wrapper that ensures observations are compatible with batch processing.
+
+ This wrapper adds a batch dimension to observations that don't already have one,
+ making them compatible with models that expect batched inputs.
+ """
+
+ def __init__(self, env):
+ """
+ Initialize the batch compatibility wrapper.
+
+ Args:
+ env: The environment to wrap.
+ """
+ super().__init__(env)
+
+ def observation(self, observation: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
+ """
+ Add batch dimensions to observations if needed.
+
+ Args:
+ observation: Dictionary of observation tensors.
+
+ Returns:
+ Dictionary of observation tensors with batch dimensions.
+ """
+ for key in observation:
+ if "image" in key and observation[key].dim() == 3:
+ observation[key] = observation[key].unsqueeze(0)
+ if "state" in key and observation[key].dim() == 1:
+ observation[key] = observation[key].unsqueeze(0)
+ if "velocity" in key and observation[key].dim() == 1:
+ observation[key] = observation[key].unsqueeze(0)
+ return observation
+
+
+class GripperPenaltyWrapper(gym.RewardWrapper):
+ """
+ Wrapper that adds penalties for inefficient gripper commands.
+
+ This wrapper modifies rewards to discourage excessive gripper movement
+ or commands that attempt to move the gripper beyond its physical limits.
+ """
+
+ def __init__(self, env, penalty: float = -0.1):
+ """
+ Initialize the gripper penalty wrapper.
+
+ Args:
+ env: The environment to wrap.
+ penalty: Negative reward value to apply for inefficient gripper actions.
+ """
+ super().__init__(env)
+ self.penalty = penalty
+ self.last_gripper_state = None
+
+ def reward(self, reward, action):
+ """
+ Apply penalties to reward based on gripper actions.
+
+ Args:
+ reward: The original reward from the environment.
+ action: The action that was taken.
+
+ Returns:
+ Modified reward with penalty applied if necessary.
+ """
+ gripper_state_normalized = self.last_gripper_state / self.unwrapped.robot.config.max_gripper_pos
+
+ action_normalized = action - 1.0 # action / MAX_GRIPPER_COMMAND
+
+ gripper_penalty_bool = (gripper_state_normalized < 0.5 and action_normalized > 0.5) or (
+ gripper_state_normalized > 0.75 and action_normalized < -0.5
+ )
+
+ return reward + self.penalty * int(gripper_penalty_bool)
+
+ def step(self, action):
+ """
+ Step the environment and apply gripper penalties.
+
+ Args:
+ action: The action to take in the environment.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info) with penalty applied.
+ """
+ self.last_gripper_state = self.unwrapped.robot.bus.sync_read("Present_Position")["gripper"]
+
+ gripper_action = action[-1]
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ gripper_penalty = self.reward(reward, gripper_action)
+
+ info["discrete_penalty"] = gripper_penalty
+
+ return obs, reward, terminated, truncated, info
+
+ def reset(self, **kwargs):
+ """
+ Reset the environment and penalty tracking.
+
+ Args:
+ **kwargs: Keyword arguments passed to the wrapped environment's reset.
+
+ Returns:
+ The initial observation and info with gripper penalty initialized.
+ """
+ self.last_gripper_state = None
+ obs, info = super().reset(**kwargs)
+ info["gripper_penalty"] = 0.0
+ return obs, info
+
+
+class GripperActionWrapper(gym.ActionWrapper):
+ """
+ Wrapper that processes gripper control commands.
+
+ This wrapper quantizes and processes gripper commands, adding a sleep time between
+ consecutive gripper actions to prevent rapid toggling.
+ """
+
+ def __init__(self, env, quantization_threshold: float = 0.2, gripper_sleep: float = 0.0):
+ """
+ Initialize the gripper action wrapper.
+
+ Args:
+ env: The environment to wrap.
+ quantization_threshold: Threshold below which gripper commands are quantized to zero.
+ gripper_sleep: Minimum time in seconds between consecutive gripper commands.
+ """
+ super().__init__(env)
+ self.quantization_threshold = quantization_threshold
+ self.gripper_sleep = gripper_sleep
+ self.last_gripper_action_time = 0.0
+ self.last_gripper_action = None
+
+ def action(self, action):
+ """
+ Process gripper commands in the action.
+
+ Args:
+ action: The original action from the agent.
+
+ Returns:
+ Modified action with processed gripper command.
+ """
+ if self.gripper_sleep > 0.0:
+ if (
+ self.last_gripper_action is not None
+ and time.perf_counter() - self.last_gripper_action_time < self.gripper_sleep
+ ):
+ action[-1] = self.last_gripper_action
+ else:
+ self.last_gripper_action_time = time.perf_counter()
+ self.last_gripper_action = action[-1]
+
+ gripper_command = action[-1]
+ # Gripper actions are between 0, 2
+ # we want to quantize them to -1, 0 or 1
+ gripper_command = gripper_command - 1.0
+
+ if self.quantization_threshold is not None:
+ # Quantize gripper command to -1, 0 or 1
+ gripper_command = (
+ np.sign(gripper_command) if abs(gripper_command) > self.quantization_threshold else 0.0
+ )
+ gripper_command = gripper_command * self.unwrapped.robot.config.max_gripper_pos
+
+ gripper_state = self.unwrapped.robot.bus.sync_read("Present_Position")["gripper"]
+
+ gripper_action_value = np.clip(
+ gripper_state + gripper_command, 0, self.unwrapped.robot.config.max_gripper_pos
+ )
+ action[-1] = gripper_action_value.item()
+ return action
+
+ def reset(self, **kwargs):
+ """
+ Reset the gripper action tracking.
+
+ Args:
+ **kwargs: Keyword arguments passed to the wrapped environment's reset.
+
+ Returns:
+ The initial observation and info.
+ """
+ obs, info = super().reset(**kwargs)
+ self.last_gripper_action_time = 0.0
+ self.last_gripper_action = None
+ return obs, info
+
+
+class EEObservationWrapper(gym.ObservationWrapper):
+ """
+ Wrapper that adds end-effector pose information to observations.
+
+ This wrapper computes the end-effector pose using forward kinematics
+ and adds it to the observation space.
+ """
+
+ def __init__(self, env, ee_pose_limits):
+ """
+ Initialize the end-effector observation wrapper.
+
+ Args:
+ env: The environment to wrap.
+ ee_pose_limits: Dictionary with 'min' and 'max' keys containing limits for EE pose.
+ """
+ super().__init__(env)
+
+ # Extend observation space to include end effector pose
+ prev_space = self.observation_space["observation.state"]
+
+ self.observation_space["observation.state"] = gym.spaces.Box(
+ low=np.concatenate([prev_space.low, ee_pose_limits["min"]]),
+ high=np.concatenate([prev_space.high, ee_pose_limits["max"]]),
+ shape=(prev_space.shape[0] + 3,),
+ dtype=np.float32,
+ )
+
+ self.kinematics = RobotKinematics(
+ urdf_path=env.unwrapped.robot.config.urdf_path,
+ target_frame_name=env.unwrapped.robot.config.target_frame_name,
+ )
+
+ def observation(self, observation):
+ """
+ Add end-effector pose to the observation.
+
+ Args:
+ observation: Original observation from the environment.
+
+ Returns:
+ Enhanced observation with end-effector pose information.
+ """
+ current_joint_pos = self.unwrapped.current_observation["agent_pos"]
+
+ current_ee_pos = self.kinematics.forward_kinematics(current_joint_pos)[:3, 3]
+ observation["agent_pos"] = np.concatenate([observation["agent_pos"], current_ee_pos], -1)
+ return observation
+
+
+###########################################################
+# Wrappers related to human intervention and input devices
+###########################################################
+
+
+class BaseLeaderControlWrapper(gym.Wrapper):
+ """
+ Base class for leader-follower robot control wrappers.
+
+ This wrapper enables human intervention through a leader-follower robot setup,
+ where the human can control a leader robot to guide the follower robot's movements.
+ """
+
+ def __init__(
+ self,
+ env,
+ teleop_device,
+ end_effector_step_sizes,
+ use_geared_leader_arm: bool = False,
+ use_gripper=False,
+ ):
+ """
+ Initialize the base leader control wrapper.
+
+ Args:
+ env: The environment to wrap.
+ teleop_device: The teleoperation device.
+ use_geared_leader_arm: Whether to use a geared leader arm setup.
+ use_gripper: Whether to include gripper control.
+ """
+ super().__init__(env)
+ self.robot_leader = teleop_device
+ self.robot_follower = env.unwrapped.robot
+ self.use_geared_leader_arm = use_geared_leader_arm
+ self.use_gripper: bool = use_gripper
+ self.end_effector_step_sizes = np.array(list(end_effector_step_sizes.values()))
+
+ # Set up keyboard event tracking
+ self._init_keyboard_events()
+ self.event_lock = Lock() # Thread-safe access to events
+
+ # Initialize robot control
+ self.kinematics = RobotKinematics(
+ urdf_path=env.unwrapped.robot.config.urdf_path,
+ target_frame_name=env.unwrapped.robot.config.target_frame_name,
+ )
+ self.leader_torque_enabled = True
+ self.prev_leader_gripper = None
+
+ # Configure leader arm
+ # NOTE: Lower the gains of leader arm for automatic take-over
+ # With lower gains we can manually move the leader arm without risk of injury to ourselves or the robot
+ # With higher gains, it would be dangerous and difficult to modify the leader's pose while torque is enabled
+ # Default value for P_coeff is 32
+ self.robot_leader.bus.sync_write("Torque_Enable", 1)
+ for motor in self.robot_leader.bus.motors:
+ self.robot_leader.bus.write("P_Coefficient", motor, 16)
+ self.robot_leader.bus.write("I_Coefficient", motor, 0)
+ self.robot_leader.bus.write("D_Coefficient", motor, 16)
+
+ self.leader_tracking_error_queue = deque(maxlen=4)
+ self._init_keyboard_listener()
+
+ def _init_keyboard_events(self):
+ """
+ Initialize the keyboard events dictionary.
+
+ This method sets up tracking for keyboard events used for intervention control.
+ It should be overridden in subclasses to add additional events.
+ """
+ self.keyboard_events = {
+ "episode_success": False,
+ "episode_end": False,
+ "rerecord_episode": False,
+ }
+
+ def _handle_key_press(self, key, keyboard_device):
+ """
+ Handle key press events.
+
+ Args:
+ key: The key that was pressed.
+ keyboard: The keyboard module with key definitions.
+
+ This method should be overridden in subclasses for additional key handling.
+ """
+ try:
+ if key == keyboard_device.Key.esc:
+ self.keyboard_events["episode_end"] = True
+ return
+ if key == keyboard_device.Key.left:
+ self.keyboard_events["rerecord_episode"] = True
+ return
+ if hasattr(key, "char") and key.char == "s":
+ logging.info("Key 's' pressed. Episode success triggered.")
+ self.keyboard_events["episode_success"] = True
+ return
+ except Exception as e:
+ logging.error(f"Error handling key press: {e}")
+
+ def _init_keyboard_listener(self):
+ """
+ Initialize the keyboard listener for intervention control.
+
+ This method sets up keyboard event handling if not in headless mode.
+ """
+ from pynput import keyboard as keyboard_device
+
+ def on_press(key):
+ with self.event_lock:
+ self._handle_key_press(key, keyboard_device)
+
+ self.listener = keyboard_device.Listener(on_press=on_press)
+ self.listener.start()
+
+ def _check_intervention(self):
+ """
+ Check if human intervention is needed.
+
+ Returns:
+ Boolean indicating whether intervention is needed.
+
+ This method should be overridden in subclasses with specific intervention logic.
+ """
+ return False
+
+ def _handle_intervention(self, action):
+ """
+ Process actions during intervention mode.
+
+ Args:
+ action: The original action from the agent.
+
+ Returns:
+ Tuple of (modified_action, intervention_action).
+ """
+ if self.leader_torque_enabled:
+ self.robot_leader.bus.sync_write("Torque_Enable", 0)
+ self.leader_torque_enabled = False
+
+ leader_pos_dict = self.robot_leader.bus.sync_read("Present_Position")
+ follower_pos_dict = self.robot_follower.bus.sync_read("Present_Position")
+
+ leader_pos = np.array([leader_pos_dict[name] for name in leader_pos_dict])
+ follower_pos = np.array([follower_pos_dict[name] for name in follower_pos_dict])
+
+ self.leader_tracking_error_queue.append(np.linalg.norm(follower_pos[:-1] - leader_pos[:-1]))
+
+ # [:3, 3] Last column of the transformation matrix corresponds to the xyz translation
+ leader_ee = self.kinematics.forward_kinematics(leader_pos)[:3, 3]
+ follower_ee = self.kinematics.forward_kinematics(follower_pos)[:3, 3]
+
+ action = np.clip(leader_ee - follower_ee, -self.end_effector_step_sizes, self.end_effector_step_sizes)
+ # Normalize the action to the range [-1, 1]
+ action = action / self.end_effector_step_sizes
+
+ if self.use_gripper:
+ if self.prev_leader_gripper is None:
+ self.prev_leader_gripper = np.clip(
+ leader_pos[-1], 0, self.robot_follower.config.max_gripper_pos
+ )
+
+ # Get gripper action delta based on leader pose
+ leader_gripper = leader_pos[-1]
+ gripper_delta = leader_gripper - self.prev_leader_gripper
+
+ # Normalize by max angle and quantize to {0,1,2}
+ normalized_delta = gripper_delta / self.robot_follower.config.max_gripper_pos
+ if normalized_delta >= 0.3:
+ gripper_action = 2
+ elif normalized_delta <= 0.1:
+ gripper_action = 0
+ else:
+ gripper_action = 1
+
+ action = np.append(action, gripper_action)
+
+ return action
+
+ def _handle_leader_teleoperation(self):
+ """
+ Handle leader teleoperation in non-intervention mode.
+
+ This method synchronizes the leader robot position with the follower.
+ """
+
+ prev_leader_pos_dict = self.robot_leader.bus.sync_read("Present_Position")
+ prev_leader_pos = np.array(
+ [prev_leader_pos_dict[name] for name in prev_leader_pos_dict], dtype=np.float32
+ )
+
+ if not self.leader_torque_enabled:
+ self.robot_leader.bus.sync_write("Torque_Enable", 1)
+ self.leader_torque_enabled = True
+
+ follower_pos_dict = self.robot_follower.bus.sync_read("Present_Position")
+ follower_pos = np.array([follower_pos_dict[name] for name in follower_pos_dict], dtype=np.float32)
+
+ goal_pos = {f"{motor}": follower_pos[i] for i, motor in enumerate(self.robot_leader.bus.motors)}
+ self.robot_leader.bus.sync_write("Goal_Position", goal_pos)
+
+ self.leader_tracking_error_queue.append(np.linalg.norm(follower_pos[:-1] - prev_leader_pos[:-1]))
+
+ def step(self, action):
+ """
+ Execute a step with possible human intervention.
+
+ Args:
+ action: The action to take in the environment.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info).
+ """
+ is_intervention = self._check_intervention()
+
+ # NOTE:
+ if is_intervention:
+ action = self._handle_intervention(action)
+ else:
+ self._handle_leader_teleoperation()
+
+ # NOTE:
+ obs, reward, terminated, truncated, info = self.env.step(action)
+
+ if isinstance(action, np.ndarray):
+ action = torch.from_numpy(action)
+
+ # Add intervention info
+ info["is_intervention"] = is_intervention
+ info["action_intervention"] = action
+
+ self.prev_leader_gripper = np.clip(
+ self.robot_leader.bus.sync_read("Present_Position")["gripper"],
+ 0,
+ self.robot_follower.config.max_gripper_pos,
+ )
+
+ # Check for success or manual termination
+ success = self.keyboard_events["episode_success"]
+ terminated = terminated or self.keyboard_events["episode_end"] or success
+
+ if success:
+ reward = 1.0
+ logging.info("Episode ended successfully with reward 1.0")
+
+ return obs, reward, terminated, truncated, info
+
+ def reset(self, **kwargs):
+ """
+ Reset the environment and intervention state.
+
+ Args:
+ **kwargs: Keyword arguments passed to the wrapped environment's reset.
+
+ Returns:
+ The initial observation and info.
+ """
+ self.keyboard_events = dict.fromkeys(self.keyboard_events, False)
+ self.leader_tracking_error_queue.clear()
+ return super().reset(**kwargs)
+
+ def close(self):
+ """
+ Clean up resources, including stopping keyboard listener.
+
+ Returns:
+ Result of closing the wrapped environment.
+ """
+ if hasattr(self, "listener") and self.listener is not None:
+ self.listener.stop()
+ return self.env.close()
+
+
+class GearedLeaderControlWrapper(BaseLeaderControlWrapper):
+ """
+ Wrapper that enables manual intervention via keyboard.
+
+ This wrapper extends the BaseLeaderControlWrapper to allow explicit toggling
+ of human intervention mode with keyboard controls.
+ """
+
+ def _init_keyboard_events(self):
+ """
+ Initialize keyboard events including human intervention flag.
+
+ Extends the base class dictionary with an additional flag for tracking
+ intervention state toggled by keyboard.
+ """
+ super()._init_keyboard_events()
+ self.keyboard_events["human_intervention_step"] = False
+
+ def _handle_key_press(self, key, keyboard_device):
+ """
+ Handle key presses including space for intervention toggle.
+
+ Args:
+ key: The key that was pressed.
+ keyboard: The keyboard module with key definitions.
+
+ Extends the base handler to respond to space key for toggling intervention.
+ """
+ super()._handle_key_press(key, keyboard_device)
+ if key == keyboard_device.Key.space:
+ if not self.keyboard_events["human_intervention_step"]:
+ logging.info(
+ "Space key pressed. Human intervention required.\n"
+ "Place the leader in similar pose to the follower and press space again."
+ )
+ self.keyboard_events["human_intervention_step"] = True
+ log_say("Human intervention step.", play_sounds=True)
+ else:
+ self.keyboard_events["human_intervention_step"] = False
+ logging.info("Space key pressed for a second time.\nContinuing with policy actions.")
+ log_say("Continuing with policy actions.", play_sounds=True)
+
+ def _check_intervention(self):
+ """
+ Check if human intervention is active based on keyboard toggle.
+
+ Returns:
+ Boolean indicating whether intervention mode is active.
+ """
+ return self.keyboard_events["human_intervention_step"]
+
+
+class GearedLeaderAutomaticControlWrapper(BaseLeaderControlWrapper):
+ """
+ Wrapper with automatic intervention based on error thresholds.
+
+ This wrapper monitors the error between leader and follower positions
+ and automatically triggers intervention when error exceeds thresholds.
+ """
+
+ def __init__(
+ self,
+ env,
+ teleop_device,
+ end_effector_step_sizes,
+ use_gripper=False,
+ intervention_threshold=10.0,
+ release_threshold=1e-2,
+ ):
+ """
+ Initialize the automatic intervention wrapper.
+
+ Args:
+ env: The environment to wrap.
+ teleop_device: The teleoperation device.
+ use_gripper: Whether to include gripper control.
+ intervention_threshold: Error threshold to trigger intervention.
+ release_threshold: Error threshold to release intervention.
+ queue_size: Number of error measurements to track for smoothing.
+ """
+ super().__init__(env, teleop_device, end_effector_step_sizes, use_gripper=use_gripper)
+
+ # Error tracking parameters
+ self.intervention_threshold = intervention_threshold # Threshold to trigger intervention
+ self.release_threshold = release_threshold # Threshold to release intervention
+ self.is_intervention_active = False
+ self.start_time = time.perf_counter()
+
+ def _check_intervention(self):
+ """
+ Determine if intervention should occur based on the rate of change of leader-follower error in end_effector space.
+
+ This method monitors the rate of change of leader-follower error in end_effector space
+ and automatically triggers intervention when the rate of change exceeds
+ the intervention threshold, releasing when it falls below the release threshold.
+
+ Returns:
+ Boolean indicating whether intervention should be active.
+ """
+
+ # Condition for starting the intervention
+ # If the error in teleoperation is too high, that means the a user has grasped the leader robot and he wants to take over
+ if (
+ not self.is_intervention_active
+ and len(self.leader_tracking_error_queue) == self.leader_tracking_error_queue.maxlen
+ and np.var(list(self.leader_tracking_error_queue)[-2:]) > self.intervention_threshold
+ ):
+ self.is_intervention_active = True
+ self.leader_tracking_error_queue.clear()
+ log_say("Intervention started", play_sounds=True)
+ return True
+
+ # Track the error over time in leader_tracking_error_queue
+ # If the variance of the tracking error is too low, that means the user has let go of the leader robot and the intervention is over
+ if (
+ self.is_intervention_active
+ and len(self.leader_tracking_error_queue) == self.leader_tracking_error_queue.maxlen
+ and np.var(self.leader_tracking_error_queue) < self.release_threshold
+ ):
+ self.is_intervention_active = False
+ self.leader_tracking_error_queue.clear()
+ log_say("Intervention ended", play_sounds=True)
+ return False
+
+ # If not change has happened that merits a change in the intervention state, return the current state
+ return self.is_intervention_active
+
+ def reset(self, **kwargs):
+ """
+ Reset error tracking on environment reset.
+
+ Args:
+ **kwargs: Keyword arguments passed to the wrapped environment's reset.
+
+ Returns:
+ The initial observation and info.
+ """
+ self.is_intervention_active = False
+ return super().reset(**kwargs)
+
+
+class GamepadControlWrapper(gym.Wrapper):
+ """
+ Wrapper that allows controlling a gym environment with a gamepad.
+
+ This wrapper intercepts the step method and allows human input via gamepad
+ to override the agent's actions when desired.
+ """
+
+ def __init__(
+ self,
+ env,
+ teleop_device, # Accepts an instantiated teleoperator
+ use_gripper=False, # This should align with teleop_device's config
+ auto_reset=False,
+ ):
+ """
+ Initialize the gamepad controller wrapper.
+
+ Args:
+ env: The environment to wrap.
+ teleop_device: The instantiated teleoperation device (e.g., GamepadTeleop).
+ use_gripper: Whether to include gripper control (should match teleop_device.config.use_gripper).
+ auto_reset: Whether to auto reset the environment when episode ends.
+ """
+ super().__init__(env)
+
+ self.teleop_device = teleop_device
+ # Ensure the teleop_device is connected if it has a connect method
+ if hasattr(self.teleop_device, "connect") and not self.teleop_device.is_connected:
+ self.teleop_device.connect()
+
+ # self.controller attribute is removed
+
+ self.auto_reset = auto_reset
+ # use_gripper from args should ideally match teleop_device.config.use_gripper
+ # For now, we use the one passed, but it can lead to inconsistency if not set correctly from config
+ self.use_gripper = use_gripper
+
+ logging.info("Gamepad control wrapper initialized with provided teleop_device.")
+ print(
+ "Gamepad controls (managed by the provided teleop_device - specific button mappings might vary):"
+ )
+ print(" Left analog stick: Move in X-Y plane")
+ print(" Right analog stick: Move in Z axis (up/down)")
+ print(" X/Square button: End episode (FAILURE)")
+ print(" Y/Triangle button: End episode (SUCCESS)")
+ print(" B/Circle button: Exit program")
+
+ def get_teleop_commands(
+ self,
+ ) -> tuple[bool, np.ndarray, bool, bool, bool]:
+ """
+ Get the current action from the gamepad if any input is active.
+
+ Returns:
+ Tuple containing:
+ - is_active: Whether gamepad input is active (from teleop_device.gamepad.should_intervene())
+ - action: The action derived from gamepad input (from teleop_device.get_action())
+ - terminate_episode: Whether episode termination was requested
+ - success: Whether episode success was signaled
+ - rerecord_episode: Whether episode rerecording was requested
+ """
+ if not hasattr(self.teleop_device, "gamepad") or self.teleop_device.gamepad is None:
+ raise AttributeError(
+ "teleop_device does not have a 'gamepad' attribute or it is None. Expected for GamepadControlWrapper."
+ )
+
+ # Get status flags from the underlying gamepad controller within the teleop_device
+ self.teleop_device.gamepad.update() # Ensure gamepad state is fresh
+ intervention_is_active = self.teleop_device.gamepad.should_intervene()
+ episode_end_status = self.teleop_device.gamepad.get_episode_end_status()
+
+ terminate_episode = episode_end_status is not None
+ success = episode_end_status == "success"
+ rerecord_episode = episode_end_status == "rerecord_episode"
+
+ # Get the action dictionary from the teleop_device
+ action_dict = self.teleop_device.get_action()
+
+ # Convert action_dict to numpy array based on expected structure
+ # Order: delta_x, delta_y, delta_z, gripper (if use_gripper)
+ action_list = [action_dict["delta_x"], action_dict["delta_y"], action_dict["delta_z"]]
+ if self.use_gripper:
+ # GamepadTeleop returns gripper action as 0 (close), 1 (stay), 2 (open)
+ # This needs to be consistent with what EEActionWrapper expects if it's used downstream
+ # EEActionWrapper for gripper typically expects 0.0 (closed) to 2.0 (open)
+ # For now, we pass the direct value from GamepadTeleop, ensure downstream compatibility.
+ gripper_val = action_dict.get("gripper", 1.0) # Default to 1.0 (stay) if not present
+ action_list.append(float(gripper_val))
+
+ gamepad_action_np = np.array(action_list, dtype=np.float32)
+
+ return (
+ intervention_is_active,
+ gamepad_action_np,
+ terminate_episode,
+ success,
+ rerecord_episode,
+ )
+
+ def step(self, action):
+ """
+ Step the environment, using gamepad input to override actions when active.
+
+ Args:
+ action: Original action from agent.
+
+ Returns:
+ Tuple of (observation, reward, terminated, truncated, info).
+ """
+ # Get gamepad state and action
+ (
+ is_intervention,
+ gamepad_action,
+ terminate_episode,
+ success,
+ rerecord_episode,
+ ) = self.get_teleop_commands()
+
+ # Update episode ending state if requested
+ if terminate_episode:
+ logging.info(f"Episode manually ended: {'SUCCESS' if success else 'FAILURE'}")
+
+ # Only override the action if gamepad is active
+ action = gamepad_action if is_intervention else action
+
+ # Step the environment
+ obs, reward, terminated, truncated, info = self.env.step(action)
+
+ # Add episode ending if requested via gamepad
+ terminated = terminated or truncated or terminate_episode
+
+ if success:
+ reward = 1.0
+ logging.info("Episode ended successfully with reward 1.0")
+
+ if isinstance(action, np.ndarray):
+ action = torch.from_numpy(action)
+
+ info["is_intervention"] = is_intervention
+ # The original `BaseLeaderControlWrapper` puts `action_intervention` in info.
+ # For Gamepad, if intervention, `gamepad_action` is the intervention.
+ # If not intervention, policy's action is `action`.
+ # For consistency, let's store the *human's* action if intervention occurred.
+ info["action_intervention"] = action
+
+ info["rerecord_episode"] = rerecord_episode
+
+ # If episode ended, reset the state
+ if terminated or truncated:
+ # Add success/failure information to info dict
+ info["next.success"] = success
+
+ # Auto reset if configured
+ if self.auto_reset:
+ obs, reset_info = self.reset()
+ info.update(reset_info)
+
+ return obs, reward, terminated, truncated, info
+
+ def close(self):
+ """
+ Clean up resources when environment closes.
+
+ Returns:
+ Result of closing the wrapped environment.
+ """
+ if hasattr(self.teleop_device, "disconnect"):
+ self.teleop_device.disconnect()
+
+ # Call the parent close method
+ return self.env.close()
+
+
+class KeyboardControlWrapper(GamepadControlWrapper):
+ """
+ Wrapper that allows controlling a gym environment with a keyboard.
+
+ This wrapper intercepts the step method and allows human input via keyboard
+ to override the agent's actions when desired.
+
+ Inherits from GamepadControlWrapper to avoid code duplication.
+ """
+
+ def __init__(
+ self,
+ env,
+ teleop_device, # Accepts an instantiated teleoperator
+ use_gripper=False, # This should align with teleop_device's config
+ auto_reset=False,
+ ):
+ """
+ Initialize the gamepad controller wrapper.
+
+ Args:
+ env: The environment to wrap.
+ teleop_device: The instantiated teleoperation device (e.g., GamepadTeleop).
+ use_gripper: Whether to include gripper control (should match teleop_device.config.use_gripper).
+ auto_reset: Whether to auto reset the environment when episode ends.
+ """
+ super().__init__(env, teleop_device, use_gripper, auto_reset)
+
+ self.is_intervention_active = False
+
+ logging.info("Keyboard control wrapper initialized with provided teleop_device.")
+ print("Keyboard controls:")
+ print(" Arrow keys: Move in X-Y plane")
+ print(" Shift and Shift_R: Move in Z axis")
+ print(" Right Ctrl and Left Ctrl: Open and close gripper")
+ print(" f: End episode with FAILURE")
+ print(" s: End episode with SUCCESS")
+ print(" r: End episode with RERECORD")
+ print(" i: Start/Stop Intervention")
+
+ def get_teleop_commands(
+ self,
+ ) -> tuple[bool, np.ndarray, bool, bool, bool]:
+ action_dict = self.teleop_device.get_action()
+ episode_end_status = None
+
+ # Unroll the misc_keys_queue to check for events related to intervention, episode success, etc.
+ while not self.teleop_device.misc_keys_queue.empty():
+ key = self.teleop_device.misc_keys_queue.get()
+ if key == "i":
+ self.is_intervention_active = not self.is_intervention_active
+ elif key == "f":
+ episode_end_status = "failure"
+ elif key == "s":
+ episode_end_status = "success"
+ elif key == "r":
+ episode_end_status = "rerecord_episode"
+
+ terminate_episode = episode_end_status is not None
+ success = episode_end_status == "success"
+ rerecord_episode = episode_end_status == "rerecord_episode"
+
+ # Convert action_dict to numpy array based on expected structure
+ # Order: delta_x, delta_y, delta_z, gripper (if use_gripper)
+ action_list = [action_dict["delta_x"], action_dict["delta_y"], action_dict["delta_z"]]
+ if self.use_gripper:
+ # GamepadTeleop returns gripper action as 0 (close), 1 (stay), 2 (open)
+ # This needs to be consistent with what EEActionWrapper expects if it's used downstream
+ # EEActionWrapper for gripper typically expects 0.0 (closed) to 2.0 (open)
+ # For now, we pass the direct value from GamepadTeleop, ensure downstream compatibility.
+ gripper_val = action_dict.get("gripper", 1.0) # Default to 1.0 (stay) if not present
+ action_list.append(float(gripper_val))
+
+ gamepad_action_np = np.array(action_list, dtype=np.float32)
+
+ return (
+ self.is_intervention_active,
+ gamepad_action_np,
+ terminate_episode,
+ success,
+ rerecord_episode,
+ )
+
+
+class GymHilDeviceWrapper(gym.Wrapper):
+ def __init__(self, env, device="cpu"):
+ super().__init__(env)
+ self.device = device
+
+ def step(self, action):
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ for k in obs:
+ obs[k] = obs[k].to(self.device)
+ if "action_intervention" in info:
+ # NOTE: This is a hack to ensure the action intervention is a float32 tensor and supported on MPS device
+ info["action_intervention"] = info["action_intervention"].astype(np.float32)
+ info["action_intervention"] = torch.from_numpy(info["action_intervention"]).to(self.device)
+ return obs, reward, terminated, truncated, info
+
+ def reset(self, *, seed: int | None = None, options: dict[str, Any] | None = None):
+ obs, info = self.env.reset(seed=seed, options=options)
+ for k in obs:
+ obs[k] = obs[k].to(self.device)
+ if "action_intervention" in info:
+ # NOTE: This is a hack to ensure the action intervention is a float32 tensor and supported on MPS device
+ info["action_intervention"] = info["action_intervention"].astype(np.float32)
+ info["action_intervention"] = torch.from_numpy(info["action_intervention"]).to(self.device)
+ return obs, info
+
+
+class GymHilObservationProcessorWrapper(gym.ObservationWrapper):
+ def __init__(self, env: gym.Env):
+ super().__init__(env)
+ prev_space = self.observation_space
+ new_space = {}
+
+ for key in prev_space:
+ if "pixels" in key:
+ for k in prev_space["pixels"]:
+ new_space[f"observation.images.{k}"] = gym.spaces.Box(
+ 0.0, 255.0, shape=(3, 128, 128), dtype=np.uint8
+ )
+
+ if key == "agent_pos":
+ new_space["observation.state"] = prev_space["agent_pos"]
+
+ self.observation_space = gym.spaces.Dict(new_space)
+
+ def observation(self, observation: dict[str, Any]) -> dict[str, Any]:
+ return preprocess_observation(observation)
+
+
+###########################################################
+# Factory functions
+###########################################################
+
+
+def make_robot_env(cfg: EnvConfig) -> gym.Env:
+ """
+ Factory function to create a robot environment.
+
+ This function builds a robot environment with all necessary wrappers
+ based on the provided configuration.
+
+ Args:
+ cfg: Configuration object containing environment parameters.
+
+ Returns:
+ A gym environment with all necessary wrappers applied.
+ """
+ if cfg.type == "hil":
+ import gym_hil # noqa: F401
+
+ # TODO (azouitine)
+ env = gym.make(
+ f"gym_hil/{cfg.task}",
+ image_obs=True,
+ render_mode="human",
+ use_gripper=cfg.wrapper.use_gripper,
+ gripper_penalty=cfg.wrapper.gripper_penalty,
+ )
+ env = GymHilObservationProcessorWrapper(env=env)
+ env = GymHilDeviceWrapper(env=env, device=cfg.device)
+ env = BatchCompatibleWrapper(env=env)
+ env = TorchActionWrapper(env=env, device=cfg.device)
+ return env
+
+ if not hasattr(cfg, "robot") or not hasattr(cfg, "teleop"):
+ raise ValueError(
+ "Configuration for 'gym_manipulator' must be HILSerlRobotEnvConfig with robot and teleop."
+ )
+
+ if cfg.robot is None:
+ raise ValueError("RobotConfig (cfg.robot) must be provided for gym_manipulator environment.")
+ robot = make_robot_from_config(cfg.robot)
+ teleop_device = make_teleoperator_from_config(cfg.teleop)
+ teleop_device.connect()
+
+ # Create base environment
+ env = RobotEnv(
+ robot=robot,
+ use_gripper=cfg.wrapper.use_gripper,
+ display_cameras=cfg.wrapper.display_cameras if cfg.wrapper else False,
+ )
+
+ # Add observation and image processing
+ if cfg.wrapper:
+ if cfg.wrapper.add_joint_velocity_to_observation:
+ env = AddJointVelocityToObservation(env=env, fps=cfg.fps)
+ if cfg.wrapper.add_current_to_observation:
+ env = AddCurrentToObservation(env=env)
+ if cfg.wrapper.add_ee_pose_to_observation:
+ env = EEObservationWrapper(env=env, ee_pose_limits=robot.end_effector_bounds)
+
+ env = ConvertToLeRobotObservation(env=env, device=cfg.device)
+
+ if cfg.wrapper and cfg.wrapper.crop_params_dict is not None:
+ env = ImageCropResizeWrapper(
+ env=env,
+ crop_params_dict=cfg.wrapper.crop_params_dict,
+ resize_size=cfg.wrapper.resize_size,
+ )
+
+ # Add reward computation and control wrappers
+ reward_classifier = init_reward_classifier(cfg)
+ if reward_classifier is not None:
+ env = RewardWrapper(env=env, reward_classifier=reward_classifier, device=cfg.device)
+
+ env = TimeLimitWrapper(env=env, control_time_s=cfg.wrapper.control_time_s, fps=cfg.fps)
+ if cfg.wrapper.use_gripper and cfg.wrapper.gripper_penalty is not None:
+ env = GripperPenaltyWrapper(
+ env=env,
+ penalty=cfg.wrapper.gripper_penalty,
+ )
+
+ # Control mode specific wrappers
+ control_mode = cfg.wrapper.control_mode
+ if control_mode == "gamepad":
+ assert isinstance(teleop_device, GamepadTeleop), (
+ "teleop_device must be an instance of GamepadTeleop for gamepad control mode"
+ )
+ env = GamepadControlWrapper(
+ env=env,
+ teleop_device=teleop_device,
+ use_gripper=cfg.wrapper.use_gripper,
+ )
+ elif control_mode == "keyboard_ee":
+ assert isinstance(teleop_device, KeyboardEndEffectorTeleop), (
+ "teleop_device must be an instance of KeyboardEndEffectorTeleop for keyboard control mode"
+ )
+ env = KeyboardControlWrapper(
+ env=env,
+ teleop_device=teleop_device,
+ use_gripper=cfg.wrapper.use_gripper,
+ )
+ elif control_mode == "leader":
+ env = GearedLeaderControlWrapper(
+ env=env,
+ teleop_device=teleop_device,
+ end_effector_step_sizes=cfg.robot.end_effector_step_sizes,
+ use_gripper=cfg.wrapper.use_gripper,
+ )
+ elif control_mode == "leader_automatic":
+ env = GearedLeaderAutomaticControlWrapper(
+ env=env,
+ teleop_device=teleop_device,
+ end_effector_step_sizes=cfg.robot.end_effector_step_sizes,
+ use_gripper=cfg.wrapper.use_gripper,
+ )
+ else:
+ raise ValueError(f"Invalid control mode: {control_mode}")
+
+ env = ResetWrapper(
+ env=env,
+ reset_pose=cfg.wrapper.fixed_reset_joint_positions,
+ reset_time_s=cfg.wrapper.reset_time_s,
+ )
+
+ env = BatchCompatibleWrapper(env=env)
+ env = TorchActionWrapper(env=env, device=cfg.device)
+
+ return env
+
+
+def init_reward_classifier(cfg):
+ """
+ Load a reward classifier policy from a pretrained path if configured.
+
+ Args:
+ cfg: The environment configuration containing classifier paths.
+
+ Returns:
+ The loaded classifier model or None if not configured.
+ """
+ if cfg.reward_classifier_pretrained_path is None:
+ return None
+
+ from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
+
+ # Get device from config or default to CUDA
+ device = getattr(cfg, "device", "cpu")
+
+ # Load the classifier directly using from_pretrained
+ classifier = Classifier.from_pretrained(
+ pretrained_name_or_path=cfg.reward_classifier_pretrained_path,
+ )
+
+ # Ensure model is on the correct device
+ classifier.to(device)
+ classifier.eval() # Set to evaluation mode
+
+ return classifier
+
+
+###########################################################
+# Record and replay functions
+###########################################################
+
+
+def record_dataset(env, policy, cfg):
+ """
+ Record a dataset of robot interactions using either a policy or teleop.
+
+ This function runs episodes in the environment and records the observations,
+ actions, and results for dataset creation.
+
+ Args:
+ env: The environment to record from.
+ policy: Optional policy to generate actions (if None, uses teleop).
+ cfg: Configuration object containing recording parameters like:
+ - repo_id: Repository ID for dataset storage
+ - dataset_root: Local root directory for dataset
+ - num_episodes: Number of episodes to record
+ - fps: Frames per second for recording
+ - push_to_hub: Whether to push dataset to Hugging Face Hub
+ - task: Name/description of the task being recorded
+ - number_of_steps_after_success: Number of additional steps to continue recording after
+ a success (reward=1) is detected. This helps collect
+ more positive examples for reward classifier training.
+ """
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
+
+ # Setup initial action (zero action if using teleop)
+ action = env.action_space.sample() * 0.0
+
+ action_names = ["delta_x_ee", "delta_y_ee", "delta_z_ee"]
+ if cfg.wrapper.use_gripper:
+ action_names.append("gripper_delta")
+
+ # Configure dataset features based on environment spaces
+ features = {
+ "observation.state": {
+ "dtype": "float32",
+ "shape": env.observation_space["observation.state"].shape,
+ "names": None,
+ },
+ "action": {
+ "dtype": "float32",
+ "shape": (len(action_names),),
+ "names": action_names,
+ },
+ "next.reward": {"dtype": "float32", "shape": (1,), "names": None},
+ "next.done": {"dtype": "bool", "shape": (1,), "names": None},
+ "complementary_info.discrete_penalty": {
+ "dtype": "float32",
+ "shape": (1,),
+ "names": ["discrete_penalty"],
+ },
+ }
+
+ # Add image features
+ for key in env.observation_space:
+ if "image" in key:
+ features[key] = {
+ "dtype": "video",
+ "shape": env.observation_space[key].shape,
+ "names": ["channels", "height", "width"],
+ }
+
+ # Create dataset
+ dataset = LeRobotDataset.create(
+ cfg.repo_id,
+ cfg.fps,
+ root=cfg.dataset_root,
+ use_videos=True,
+ image_writer_threads=4,
+ image_writer_processes=0,
+ features=features,
+ )
+
+ # Record episodes
+ episode_index = 0
+ recorded_action = None
+ while episode_index < cfg.num_episodes:
+ obs, _ = env.reset()
+ start_episode_t = time.perf_counter()
+ log_say(f"Recording episode {episode_index}", play_sounds=True)
+
+ # Track success state collection
+ success_detected = False
+ success_steps_collected = 0
+
+ # Run episode steps
+ while time.perf_counter() - start_episode_t < cfg.wrapper.control_time_s:
+ start_loop_t = time.perf_counter()
+
+ # Get action from policy if available
+ if cfg.pretrained_policy_name_or_path is not None:
+ action = policy.select_action(obs)
+
+ # Step environment
+ obs, reward, terminated, truncated, info = env.step(action)
+
+ # Check if episode needs to be rerecorded
+ if info.get("rerecord_episode", False):
+ break
+
+ # For teleop, get action from intervention
+ recorded_action = {
+ "action": info["action_intervention"].cpu().squeeze(0).float() if policy is None else action
+ }
+
+ # Process observation for dataset
+ obs_processed = {k: v.cpu().squeeze(0).float() for k, v in obs.items()}
+
+ # Check if we've just detected success
+ if reward == 1.0 and not success_detected:
+ success_detected = True
+ logging.info("Success detected! Collecting additional success states.")
+
+ # Add frame to dataset - continue marking as success even during extra collection steps
+ frame = {**obs_processed, **recorded_action}
+
+ # If we're in the success collection phase, keep marking rewards as 1.0
+ if success_detected:
+ frame["next.reward"] = np.array([1.0], dtype=np.float32)
+ else:
+ frame["next.reward"] = np.array([reward], dtype=np.float32)
+
+ # Only mark as done if we're truly done (reached end or collected enough success states)
+ really_done = terminated or truncated
+ if success_detected:
+ success_steps_collected += 1
+ really_done = success_steps_collected >= cfg.number_of_steps_after_success
+
+ frame["next.done"] = np.array([really_done], dtype=bool)
+ frame["complementary_info.discrete_penalty"] = torch.tensor(
+ [info.get("discrete_penalty", 0.0)], dtype=torch.float32
+ )
+ dataset.add_frame(frame, task=cfg.task)
+
+ # Maintain consistent timing
+ if cfg.fps:
+ dt_s = time.perf_counter() - start_loop_t
+ busy_wait(1 / cfg.fps - dt_s)
+
+ # Check if we should end the episode
+ if (terminated or truncated) and not success_detected:
+ # Regular termination without success
+ break
+ elif success_detected and success_steps_collected >= cfg.number_of_steps_after_success:
+ # We've collected enough success states
+ logging.info(f"Collected {success_steps_collected} additional success states")
+ break
+
+ # Handle episode recording
+ if info.get("rerecord_episode", False):
+ dataset.clear_episode_buffer()
+ logging.info(f"Re-recording episode {episode_index}")
+ continue
+
+ dataset.save_episode()
+ episode_index += 1
+
+ # Finalize dataset
+ # dataset.consolidate(run_compute_stats=True)
+ if cfg.push_to_hub:
+ dataset.push_to_hub()
+
+
+def replay_episode(env, cfg):
+ """
+ Replay a recorded episode in the environment.
+
+ This function loads actions from a previously recorded episode
+ and executes them in the environment.
+
+ Args:
+ env: The environment to replay in.
+ cfg: Configuration object containing replay parameters:
+ - repo_id: Repository ID for dataset
+ - dataset_root: Local root directory for dataset
+ - episode: Episode ID to replay
+ """
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
+
+ dataset = LeRobotDataset(cfg.repo_id, root=cfg.dataset_root, episodes=[cfg.episode])
+ env.reset()
+
+ actions = dataset.hf_dataset.select_columns("action")
+
+ for idx in range(dataset.num_frames):
+ start_episode_t = time.perf_counter()
+
+ action = actions[idx]["action"]
+ env.step(action)
+
+ dt_s = time.perf_counter() - start_episode_t
+ busy_wait(1 / 10 - dt_s)
+
+
+@parser.wrap()
+def main(cfg: EnvConfig):
+ """Main entry point for the robot environment script.
+
+ This function runs the robot environment in one of several modes
+ based on the provided configuration.
+
+ Args:
+ cfg: Configuration object defining the run parameters,
+ including mode (record, replay, random) and other settings.
+ """
+ env = make_robot_env(cfg)
+
+ if cfg.mode == "record":
+ policy = None
+ if cfg.pretrained_policy_name_or_path is not None:
+ from lerobot.policies.sac.modeling_sac import SACPolicy
+
+ policy = SACPolicy.from_pretrained(cfg.pretrained_policy_name_or_path)
+ policy.to(cfg.device)
+ policy.eval()
+
+ record_dataset(
+ env,
+ policy=policy,
+ cfg=cfg,
+ )
+ exit()
+
+ if cfg.mode == "replay":
+ replay_episode(
+ env,
+ cfg=cfg,
+ )
+ exit()
+
+ env.reset()
+
+ # Initialize the smoothed action as a random sample.
+ smoothed_action = env.action_space.sample() * 0.0
+
+ # Smoothing coefficient (alpha) defines how much of the new random sample to mix in.
+ # A value close to 0 makes the trajectory very smooth (slow to change), while a value close to 1 is less smooth.
+ alpha = 1.0
+
+ num_episode = 0
+ successes = []
+ while num_episode < 10:
+ start_loop_s = time.perf_counter()
+ # Sample a new random action from the robot's action space.
+ new_random_action = env.action_space.sample()
+ # Update the smoothed action using an exponential moving average.
+ smoothed_action = alpha * new_random_action + (1 - alpha) * smoothed_action
+
+ # Execute the step: wrap the NumPy action in a torch tensor.
+ obs, reward, terminated, truncated, info = env.step(smoothed_action)
+ if terminated or truncated:
+ successes.append(reward)
+ env.reset()
+ num_episode += 1
+
+ dt_s = time.perf_counter() - start_loop_s
+ busy_wait(1 / cfg.fps - dt_s)
+
+ logging.info(f"Success after 20 steps {successes}")
+ logging.info(f"success rate {sum(successes) / len(successes)}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/lerobot/scripts/rl/learner.py b/src/lerobot/scripts/rl/learner.py
new file mode 100644
index 0000000000..f9f3901ce7
--- /dev/null
+++ b/src/lerobot/scripts/rl/learner.py
@@ -0,0 +1,1215 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Learner server runner for distributed HILSerl robot policy training.
+
+This script implements the learner component of the distributed HILSerl architecture.
+It initializes the policy network, maintains replay buffers, and updates
+the policy based on transitions received from the actor server.
+
+Examples of usage:
+
+- Start a learner server for training:
+```bash
+python -m lerobot.scripts.rl.learner --config_path src/lerobot/configs/train_config_hilserl_so100.json
+```
+
+**NOTE**: Start the learner server before launching the actor server. The learner opens a gRPC server
+to communicate with actors.
+
+**NOTE**: Training progress can be monitored through Weights & Biases if wandb.enable is set to true
+in your configuration.
+
+**WORKFLOW**:
+1. Create training configuration with proper policy, dataset, and environment settings
+2. Start this learner server with the configuration
+3. Start an actor server with the same configuration
+4. Monitor training progress through wandb dashboard
+
+For more details on the complete HILSerl training workflow, see:
+https://github.com/michel-aractingi/lerobot-hilserl-guide
+"""
+
+import logging
+import os
+import shutil
+import time
+from concurrent.futures import ThreadPoolExecutor
+from pathlib import Path
+from pprint import pformat
+
+import grpc
+import torch
+from termcolor import colored
+from torch import nn
+from torch.multiprocessing import Queue
+from torch.optim.optimizer import Optimizer
+
+from lerobot.cameras import opencv # noqa: F401
+from lerobot.configs import parser
+from lerobot.configs.train import TrainRLServerPipelineConfig
+from lerobot.constants import (
+ CHECKPOINTS_DIR,
+ LAST_CHECKPOINT_LINK,
+ PRETRAINED_MODEL_DIR,
+ TRAINING_STATE_DIR,
+)
+from lerobot.datasets.factory import make_dataset
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.policies.factory import make_policy
+from lerobot.policies.sac.modeling_sac import SACPolicy
+from lerobot.robots import so100_follower # noqa: F401
+from lerobot.scripts.rl import learner_service
+from lerobot.teleoperators import gamepad, so101_leader # noqa: F401
+from lerobot.transport import services_pb2_grpc
+from lerobot.transport.utils import (
+ MAX_MESSAGE_SIZE,
+ bytes_to_python_object,
+ bytes_to_transitions,
+ state_to_bytes,
+)
+from lerobot.utils.buffer import ReplayBuffer, concatenate_batch_transitions
+from lerobot.utils.process import ProcessSignalHandler
+from lerobot.utils.random_utils import set_seed
+from lerobot.utils.train_utils import (
+ get_step_checkpoint_dir,
+ load_training_state as utils_load_training_state,
+ save_checkpoint,
+ update_last_checkpoint,
+)
+from lerobot.utils.transition import move_state_dict_to_device, move_transition_to_device
+from lerobot.utils.utils import (
+ format_big_number,
+ get_safe_torch_device,
+ init_logging,
+)
+from lerobot.utils.wandb_utils import WandBLogger
+
+LOG_PREFIX = "[LEARNER]"
+
+
+#################################################
+# MAIN ENTRY POINTS AND CORE ALGORITHM FUNCTIONS #
+#################################################
+
+
+@parser.wrap()
+def train_cli(cfg: TrainRLServerPipelineConfig):
+ if not use_threads(cfg):
+ import torch.multiprocessing as mp
+
+ mp.set_start_method("spawn")
+
+ # Use the job_name from the config
+ train(
+ cfg,
+ job_name=cfg.job_name,
+ )
+
+ logging.info("[LEARNER] train_cli finished")
+
+
+def train(cfg: TrainRLServerPipelineConfig, job_name: str | None = None):
+ """
+ Main training function that initializes and runs the training process.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): The training configuration
+ job_name (str | None, optional): Job name for logging. Defaults to None.
+ """
+
+ cfg.validate()
+
+ if job_name is None:
+ job_name = cfg.job_name
+
+ if job_name is None:
+ raise ValueError("Job name must be specified either in config or as a parameter")
+
+ display_pid = False
+ if not use_threads(cfg):
+ display_pid = True
+
+ # Create logs directory to ensure it exists
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"learner_{job_name}.log")
+
+ # Initialize logging with explicit log file
+ init_logging(log_file=log_file, display_pid=display_pid)
+ logging.info(f"Learner logging initialized, writing to {log_file}")
+ logging.info(pformat(cfg.to_dict()))
+
+ # Setup WandB logging if enabled
+ if cfg.wandb.enable and cfg.wandb.project:
+ from lerobot.utils.wandb_utils import WandBLogger
+
+ wandb_logger = WandBLogger(cfg)
+ else:
+ wandb_logger = None
+ logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"]))
+
+ # Handle resume logic
+ cfg = handle_resume_logic(cfg)
+
+ set_seed(seed=cfg.seed)
+
+ torch.backends.cudnn.benchmark = True
+ torch.backends.cuda.matmul.allow_tf32 = True
+
+ is_threaded = use_threads(cfg)
+ shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event
+
+ start_learner_threads(
+ cfg=cfg,
+ wandb_logger=wandb_logger,
+ shutdown_event=shutdown_event,
+ )
+
+
+def start_learner_threads(
+ cfg: TrainRLServerPipelineConfig,
+ wandb_logger: WandBLogger | None,
+ shutdown_event: any, # Event,
+) -> None:
+ """
+ Start the learner threads for training.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): Training configuration
+ wandb_logger (WandBLogger | None): Logger for metrics
+ shutdown_event: Event to signal shutdown
+ """
+ # Create multiprocessing queues
+ transition_queue = Queue()
+ interaction_message_queue = Queue()
+ parameters_queue = Queue()
+
+ concurrency_entity = None
+
+ if use_threads(cfg):
+ from threading import Thread
+
+ concurrency_entity = Thread
+ else:
+ from torch.multiprocessing import Process
+
+ concurrency_entity = Process
+
+ communication_process = concurrency_entity(
+ target=start_learner,
+ args=(
+ parameters_queue,
+ transition_queue,
+ interaction_message_queue,
+ shutdown_event,
+ cfg,
+ ),
+ daemon=True,
+ )
+ communication_process.start()
+
+ add_actor_information_and_train(
+ cfg=cfg,
+ wandb_logger=wandb_logger,
+ shutdown_event=shutdown_event,
+ transition_queue=transition_queue,
+ interaction_message_queue=interaction_message_queue,
+ parameters_queue=parameters_queue,
+ )
+ logging.info("[LEARNER] Training process stopped")
+
+ logging.info("[LEARNER] Closing queues")
+ transition_queue.close()
+ interaction_message_queue.close()
+ parameters_queue.close()
+
+ communication_process.join()
+ logging.info("[LEARNER] Communication process joined")
+
+ logging.info("[LEARNER] join queues")
+ transition_queue.cancel_join_thread()
+ interaction_message_queue.cancel_join_thread()
+ parameters_queue.cancel_join_thread()
+
+ logging.info("[LEARNER] queues closed")
+
+
+#################################################
+# Core algorithm functions #
+#################################################
+
+
+def add_actor_information_and_train(
+ cfg: TrainRLServerPipelineConfig,
+ wandb_logger: WandBLogger | None,
+ shutdown_event: any, # Event,
+ transition_queue: Queue,
+ interaction_message_queue: Queue,
+ parameters_queue: Queue,
+):
+ """
+ Handles data transfer from the actor to the learner, manages training updates,
+ and logs training progress in an online reinforcement learning setup.
+
+ This function continuously:
+ - Transfers transitions from the actor to the replay buffer.
+ - Logs received interaction messages.
+ - Ensures training begins only when the replay buffer has a sufficient number of transitions.
+ - Samples batches from the replay buffer and performs multiple critic updates.
+ - Periodically updates the actor, critic, and temperature optimizers.
+ - Logs training statistics, including loss values and optimization frequency.
+
+ NOTE: This function doesn't have a single responsibility, it should be split into multiple functions
+ in the future. The reason why we did that is the GIL in Python. It's super slow the performance
+ are divided by 200. So we need to have a single thread that does all the work.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): Configuration object containing hyperparameters.
+ wandb_logger (WandBLogger | None): Logger for tracking training progress.
+ shutdown_event (Event): Event to signal shutdown.
+ transition_queue (Queue): Queue for receiving transitions from the actor.
+ interaction_message_queue (Queue): Queue for receiving interaction messages from the actor.
+ parameters_queue (Queue): Queue for sending policy parameters to the actor.
+ """
+ # Extract all configuration variables at the beginning, it improve the speed performance
+ # of 7%
+ device = get_safe_torch_device(try_device=cfg.policy.device, log=True)
+ storage_device = get_safe_torch_device(try_device=cfg.policy.storage_device)
+ clip_grad_norm_value = cfg.policy.grad_clip_norm
+ online_step_before_learning = cfg.policy.online_step_before_learning
+ utd_ratio = cfg.policy.utd_ratio
+ fps = cfg.env.fps
+ log_freq = cfg.log_freq
+ save_freq = cfg.save_freq
+ policy_update_freq = cfg.policy.policy_update_freq
+ policy_parameters_push_frequency = cfg.policy.actor_learner_config.policy_parameters_push_frequency
+ saving_checkpoint = cfg.save_checkpoint
+ online_steps = cfg.policy.online_steps
+ async_prefetch = cfg.policy.async_prefetch
+
+ # Initialize logging for multiprocessing
+ if not use_threads(cfg):
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"learner_train_process_{os.getpid()}.log")
+ init_logging(log_file=log_file, display_pid=True)
+ logging.info("Initialized logging for actor information and training process")
+
+ logging.info("Initializing policy")
+
+ policy: SACPolicy = make_policy(
+ cfg=cfg.policy,
+ env_cfg=cfg.env,
+ )
+
+ assert isinstance(policy, nn.Module)
+
+ policy.train()
+
+ push_actor_policy_to_queue(parameters_queue=parameters_queue, policy=policy)
+
+ last_time_policy_pushed = time.time()
+
+ optimizers, lr_scheduler = make_optimizers_and_scheduler(cfg=cfg, policy=policy)
+
+ # If we are resuming, we need to load the training state
+ resume_optimization_step, resume_interaction_step = load_training_state(cfg=cfg, optimizers=optimizers)
+
+ log_training_info(cfg=cfg, policy=policy)
+
+ replay_buffer = initialize_replay_buffer(cfg, device, storage_device)
+ batch_size = cfg.batch_size
+ offline_replay_buffer = None
+
+ if cfg.dataset is not None:
+ offline_replay_buffer = initialize_offline_replay_buffer(
+ cfg=cfg,
+ device=device,
+ storage_device=storage_device,
+ )
+ batch_size: int = batch_size // 2 # We will sample from both replay buffer
+
+ logging.info("Starting learner thread")
+ interaction_message = None
+ optimization_step = resume_optimization_step if resume_optimization_step is not None else 0
+ interaction_step_shift = resume_interaction_step if resume_interaction_step is not None else 0
+
+ dataset_repo_id = None
+ if cfg.dataset is not None:
+ dataset_repo_id = cfg.dataset.repo_id
+
+ # Initialize iterators
+ online_iterator = None
+ offline_iterator = None
+
+ # NOTE: THIS IS THE MAIN LOOP OF THE LEARNER
+ while True:
+ # Exit the training loop if shutdown is requested
+ if shutdown_event is not None and shutdown_event.is_set():
+ logging.info("[LEARNER] Shutdown signal received. Exiting...")
+ break
+
+ # Process all available transitions to the replay buffer, send by the actor server
+ process_transitions(
+ transition_queue=transition_queue,
+ replay_buffer=replay_buffer,
+ offline_replay_buffer=offline_replay_buffer,
+ device=device,
+ dataset_repo_id=dataset_repo_id,
+ shutdown_event=shutdown_event,
+ )
+
+ # Process all available interaction messages sent by the actor server
+ interaction_message = process_interaction_messages(
+ interaction_message_queue=interaction_message_queue,
+ interaction_step_shift=interaction_step_shift,
+ wandb_logger=wandb_logger,
+ shutdown_event=shutdown_event,
+ )
+
+ # Wait until the replay buffer has enough samples to start training
+ if len(replay_buffer) < online_step_before_learning:
+ continue
+
+ if online_iterator is None:
+ online_iterator = replay_buffer.get_iterator(
+ batch_size=batch_size, async_prefetch=async_prefetch, queue_size=2
+ )
+
+ if offline_replay_buffer is not None and offline_iterator is None:
+ offline_iterator = offline_replay_buffer.get_iterator(
+ batch_size=batch_size, async_prefetch=async_prefetch, queue_size=2
+ )
+
+ time_for_one_optimization_step = time.time()
+ for _ in range(utd_ratio - 1):
+ # Sample from the iterators
+ batch = next(online_iterator)
+
+ if dataset_repo_id is not None:
+ batch_offline = next(offline_iterator)
+ batch = concatenate_batch_transitions(
+ left_batch_transitions=batch, right_batch_transition=batch_offline
+ )
+
+ actions = batch["action"]
+ rewards = batch["reward"]
+ observations = batch["state"]
+ next_observations = batch["next_state"]
+ done = batch["done"]
+ check_nan_in_transition(observations=observations, actions=actions, next_state=next_observations)
+
+ observation_features, next_observation_features = get_observation_features(
+ policy=policy, observations=observations, next_observations=next_observations
+ )
+
+ # Create a batch dictionary with all required elements for the forward method
+ forward_batch = {
+ "action": actions,
+ "reward": rewards,
+ "state": observations,
+ "next_state": next_observations,
+ "done": done,
+ "observation_feature": observation_features,
+ "next_observation_feature": next_observation_features,
+ "complementary_info": batch["complementary_info"],
+ }
+
+ # Use the forward method for critic loss
+ critic_output = policy.forward(forward_batch, model="critic")
+
+ # Main critic optimization
+ loss_critic = critic_output["loss_critic"]
+ optimizers["critic"].zero_grad()
+ loss_critic.backward()
+ critic_grad_norm = torch.nn.utils.clip_grad_norm_(
+ parameters=policy.critic_ensemble.parameters(), max_norm=clip_grad_norm_value
+ )
+ optimizers["critic"].step()
+
+ # Discrete critic optimization (if available)
+ if policy.config.num_discrete_actions is not None:
+ discrete_critic_output = policy.forward(forward_batch, model="discrete_critic")
+ loss_discrete_critic = discrete_critic_output["loss_discrete_critic"]
+ optimizers["discrete_critic"].zero_grad()
+ loss_discrete_critic.backward()
+ discrete_critic_grad_norm = torch.nn.utils.clip_grad_norm_(
+ parameters=policy.discrete_critic.parameters(), max_norm=clip_grad_norm_value
+ )
+ optimizers["discrete_critic"].step()
+
+ # Update target networks (main and discrete)
+ policy.update_target_networks()
+
+ # Sample for the last update in the UTD ratio
+ batch = next(online_iterator)
+
+ if dataset_repo_id is not None:
+ batch_offline = next(offline_iterator)
+ batch = concatenate_batch_transitions(
+ left_batch_transitions=batch, right_batch_transition=batch_offline
+ )
+
+ actions = batch["action"]
+ rewards = batch["reward"]
+ observations = batch["state"]
+ next_observations = batch["next_state"]
+ done = batch["done"]
+
+ check_nan_in_transition(observations=observations, actions=actions, next_state=next_observations)
+
+ observation_features, next_observation_features = get_observation_features(
+ policy=policy, observations=observations, next_observations=next_observations
+ )
+
+ # Create a batch dictionary with all required elements for the forward method
+ forward_batch = {
+ "action": actions,
+ "reward": rewards,
+ "state": observations,
+ "next_state": next_observations,
+ "done": done,
+ "observation_feature": observation_features,
+ "next_observation_feature": next_observation_features,
+ }
+
+ critic_output = policy.forward(forward_batch, model="critic")
+
+ loss_critic = critic_output["loss_critic"]
+ optimizers["critic"].zero_grad()
+ loss_critic.backward()
+ critic_grad_norm = torch.nn.utils.clip_grad_norm_(
+ parameters=policy.critic_ensemble.parameters(), max_norm=clip_grad_norm_value
+ ).item()
+ optimizers["critic"].step()
+
+ # Initialize training info dictionary
+ training_infos = {
+ "loss_critic": loss_critic.item(),
+ "critic_grad_norm": critic_grad_norm,
+ }
+
+ # Discrete critic optimization (if available)
+ if policy.config.num_discrete_actions is not None:
+ discrete_critic_output = policy.forward(forward_batch, model="discrete_critic")
+ loss_discrete_critic = discrete_critic_output["loss_discrete_critic"]
+ optimizers["discrete_critic"].zero_grad()
+ loss_discrete_critic.backward()
+ discrete_critic_grad_norm = torch.nn.utils.clip_grad_norm_(
+ parameters=policy.discrete_critic.parameters(), max_norm=clip_grad_norm_value
+ ).item()
+ optimizers["discrete_critic"].step()
+
+ # Add discrete critic info to training info
+ training_infos["loss_discrete_critic"] = loss_discrete_critic.item()
+ training_infos["discrete_critic_grad_norm"] = discrete_critic_grad_norm
+
+ # Actor and temperature optimization (at specified frequency)
+ if optimization_step % policy_update_freq == 0:
+ for _ in range(policy_update_freq):
+ # Actor optimization
+ actor_output = policy.forward(forward_batch, model="actor")
+ loss_actor = actor_output["loss_actor"]
+ optimizers["actor"].zero_grad()
+ loss_actor.backward()
+ actor_grad_norm = torch.nn.utils.clip_grad_norm_(
+ parameters=policy.actor.parameters(), max_norm=clip_grad_norm_value
+ ).item()
+ optimizers["actor"].step()
+
+ # Add actor info to training info
+ training_infos["loss_actor"] = loss_actor.item()
+ training_infos["actor_grad_norm"] = actor_grad_norm
+
+ # Temperature optimization
+ temperature_output = policy.forward(forward_batch, model="temperature")
+ loss_temperature = temperature_output["loss_temperature"]
+ optimizers["temperature"].zero_grad()
+ loss_temperature.backward()
+ temp_grad_norm = torch.nn.utils.clip_grad_norm_(
+ parameters=[policy.log_alpha], max_norm=clip_grad_norm_value
+ ).item()
+ optimizers["temperature"].step()
+
+ # Add temperature info to training info
+ training_infos["loss_temperature"] = loss_temperature.item()
+ training_infos["temperature_grad_norm"] = temp_grad_norm
+ training_infos["temperature"] = policy.temperature
+
+ # Update temperature
+ policy.update_temperature()
+
+ # Push policy to actors if needed
+ if time.time() - last_time_policy_pushed > policy_parameters_push_frequency:
+ push_actor_policy_to_queue(parameters_queue=parameters_queue, policy=policy)
+ last_time_policy_pushed = time.time()
+
+ # Update target networks (main and discrete)
+ policy.update_target_networks()
+
+ # Log training metrics at specified intervals
+ if optimization_step % log_freq == 0:
+ training_infos["replay_buffer_size"] = len(replay_buffer)
+ if offline_replay_buffer is not None:
+ training_infos["offline_replay_buffer_size"] = len(offline_replay_buffer)
+ training_infos["Optimization step"] = optimization_step
+
+ # Log training metrics
+ if wandb_logger:
+ wandb_logger.log_dict(d=training_infos, mode="train", custom_step_key="Optimization step")
+
+ # Calculate and log optimization frequency
+ time_for_one_optimization_step = time.time() - time_for_one_optimization_step
+ frequency_for_one_optimization_step = 1 / (time_for_one_optimization_step + 1e-9)
+
+ logging.info(f"[LEARNER] Optimization frequency loop [Hz]: {frequency_for_one_optimization_step}")
+
+ # Log optimization frequency
+ if wandb_logger:
+ wandb_logger.log_dict(
+ {
+ "Optimization frequency loop [Hz]": frequency_for_one_optimization_step,
+ "Optimization step": optimization_step,
+ },
+ mode="train",
+ custom_step_key="Optimization step",
+ )
+
+ optimization_step += 1
+ if optimization_step % log_freq == 0:
+ logging.info(f"[LEARNER] Number of optimization step: {optimization_step}")
+
+ # Save checkpoint at specified intervals
+ if saving_checkpoint and (optimization_step % save_freq == 0 or optimization_step == online_steps):
+ save_training_checkpoint(
+ cfg=cfg,
+ optimization_step=optimization_step,
+ online_steps=online_steps,
+ interaction_message=interaction_message,
+ policy=policy,
+ optimizers=optimizers,
+ replay_buffer=replay_buffer,
+ offline_replay_buffer=offline_replay_buffer,
+ dataset_repo_id=dataset_repo_id,
+ fps=fps,
+ )
+
+
+def start_learner(
+ parameters_queue: Queue,
+ transition_queue: Queue,
+ interaction_message_queue: Queue,
+ shutdown_event: any, # Event,
+ cfg: TrainRLServerPipelineConfig,
+):
+ """
+ Start the learner server for training.
+ It will receive transitions and interaction messages from the actor server,
+ and send policy parameters to the actor server.
+
+ Args:
+ parameters_queue: Queue for sending policy parameters to the actor
+ transition_queue: Queue for receiving transitions from the actor
+ interaction_message_queue: Queue for receiving interaction messages from the actor
+ shutdown_event: Event to signal shutdown
+ cfg: Training configuration
+ """
+ if not use_threads(cfg):
+ # Create a process-specific log file
+ log_dir = os.path.join(cfg.output_dir, "logs")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, f"learner_process_{os.getpid()}.log")
+
+ # Initialize logging with explicit log file
+ init_logging(log_file=log_file, display_pid=True)
+ logging.info("Learner server process logging initialized")
+
+ # Setup process handlers to handle shutdown signal
+ # But use shutdown event from the main process
+ # Return back for MP
+ # TODO: Check if its useful
+ _ = ProcessSignalHandler(False, display_pid=True)
+
+ service = learner_service.LearnerService(
+ shutdown_event=shutdown_event,
+ parameters_queue=parameters_queue,
+ seconds_between_pushes=cfg.policy.actor_learner_config.policy_parameters_push_frequency,
+ transition_queue=transition_queue,
+ interaction_message_queue=interaction_message_queue,
+ queue_get_timeout=cfg.policy.actor_learner_config.queue_get_timeout,
+ )
+
+ server = grpc.server(
+ ThreadPoolExecutor(max_workers=learner_service.MAX_WORKERS),
+ options=[
+ ("grpc.max_receive_message_length", MAX_MESSAGE_SIZE),
+ ("grpc.max_send_message_length", MAX_MESSAGE_SIZE),
+ ],
+ )
+
+ services_pb2_grpc.add_LearnerServiceServicer_to_server(
+ service,
+ server,
+ )
+
+ host = cfg.policy.actor_learner_config.learner_host
+ port = cfg.policy.actor_learner_config.learner_port
+
+ server.add_insecure_port(f"{host}:{port}")
+ server.start()
+ logging.info("[LEARNER] gRPC server started")
+
+ shutdown_event.wait()
+ logging.info("[LEARNER] Stopping gRPC server...")
+ server.stop(learner_service.SHUTDOWN_TIMEOUT)
+ logging.info("[LEARNER] gRPC server stopped")
+
+
+def save_training_checkpoint(
+ cfg: TrainRLServerPipelineConfig,
+ optimization_step: int,
+ online_steps: int,
+ interaction_message: dict | None,
+ policy: nn.Module,
+ optimizers: dict[str, Optimizer],
+ replay_buffer: ReplayBuffer,
+ offline_replay_buffer: ReplayBuffer | None = None,
+ dataset_repo_id: str | None = None,
+ fps: int = 30,
+) -> None:
+ """
+ Save training checkpoint and associated data.
+
+ This function performs the following steps:
+ 1. Creates a checkpoint directory with the current optimization step
+ 2. Saves the policy model, configuration, and optimizer states
+ 3. Saves the current interaction step for resuming training
+ 4. Updates the "last" checkpoint symlink to point to this checkpoint
+ 5. Saves the replay buffer as a dataset for later use
+ 6. If an offline replay buffer exists, saves it as a separate dataset
+
+ Args:
+ cfg: Training configuration
+ optimization_step: Current optimization step
+ online_steps: Total number of online steps
+ interaction_message: Dictionary containing interaction information
+ policy: Policy model to save
+ optimizers: Dictionary of optimizers
+ replay_buffer: Replay buffer to save as dataset
+ offline_replay_buffer: Optional offline replay buffer to save
+ dataset_repo_id: Repository ID for dataset
+ fps: Frames per second for dataset
+ """
+ logging.info(f"Checkpoint policy after step {optimization_step}")
+ _num_digits = max(6, len(str(online_steps)))
+ interaction_step = interaction_message["Interaction step"] if interaction_message is not None else 0
+
+ # Create checkpoint directory
+ checkpoint_dir = get_step_checkpoint_dir(cfg.output_dir, online_steps, optimization_step)
+
+ # Save checkpoint
+ save_checkpoint(
+ checkpoint_dir=checkpoint_dir,
+ step=optimization_step,
+ cfg=cfg,
+ policy=policy,
+ optimizer=optimizers,
+ scheduler=None,
+ )
+
+ # Save interaction step manually
+ training_state_dir = os.path.join(checkpoint_dir, TRAINING_STATE_DIR)
+ os.makedirs(training_state_dir, exist_ok=True)
+ training_state = {"step": optimization_step, "interaction_step": interaction_step}
+ torch.save(training_state, os.path.join(training_state_dir, "training_state.pt"))
+
+ # Update the "last" symlink
+ update_last_checkpoint(checkpoint_dir)
+
+ # TODO : temporary save replay buffer here, remove later when on the robot
+ # We want to control this with the keyboard inputs
+ dataset_dir = os.path.join(cfg.output_dir, "dataset")
+ if os.path.exists(dataset_dir) and os.path.isdir(dataset_dir):
+ shutil.rmtree(dataset_dir)
+
+ # Save dataset
+ # NOTE: Handle the case where the dataset repo id is not specified in the config
+ # eg. RL training without demonstrations data
+ repo_id_buffer_save = cfg.env.task if dataset_repo_id is None else dataset_repo_id
+ replay_buffer.to_lerobot_dataset(repo_id=repo_id_buffer_save, fps=fps, root=dataset_dir)
+
+ if offline_replay_buffer is not None:
+ dataset_offline_dir = os.path.join(cfg.output_dir, "dataset_offline")
+ if os.path.exists(dataset_offline_dir) and os.path.isdir(dataset_offline_dir):
+ shutil.rmtree(dataset_offline_dir)
+
+ offline_replay_buffer.to_lerobot_dataset(
+ cfg.dataset.repo_id,
+ fps=fps,
+ root=dataset_offline_dir,
+ )
+
+ logging.info("Resume training")
+
+
+def make_optimizers_and_scheduler(cfg: TrainRLServerPipelineConfig, policy: nn.Module):
+ """
+ Creates and returns optimizers for the actor, critic, and temperature components of a reinforcement learning policy.
+
+ This function sets up Adam optimizers for:
+ - The **actor network**, ensuring that only relevant parameters are optimized.
+ - The **critic ensemble**, which evaluates the value function.
+ - The **temperature parameter**, which controls the entropy in soft actor-critic (SAC)-like methods.
+
+ It also initializes a learning rate scheduler, though currently, it is set to `None`.
+
+ NOTE:
+ - If the encoder is shared, its parameters are excluded from the actor's optimization process.
+ - The policy's log temperature (`log_alpha`) is wrapped in a list to ensure proper optimization as a standalone tensor.
+
+ Args:
+ cfg: Configuration object containing hyperparameters.
+ policy (nn.Module): The policy model containing the actor, critic, and temperature components.
+
+ Returns:
+ Tuple[Dict[str, torch.optim.Optimizer], Optional[torch.optim.lr_scheduler._LRScheduler]]:
+ A tuple containing:
+ - `optimizers`: A dictionary mapping component names ("actor", "critic", "temperature") to their respective Adam optimizers.
+ - `lr_scheduler`: Currently set to `None` but can be extended to support learning rate scheduling.
+
+ """
+ optimizer_actor = torch.optim.Adam(
+ params=[
+ p
+ for n, p in policy.actor.named_parameters()
+ if not policy.config.shared_encoder or not n.startswith("encoder")
+ ],
+ lr=cfg.policy.actor_lr,
+ )
+ optimizer_critic = torch.optim.Adam(params=policy.critic_ensemble.parameters(), lr=cfg.policy.critic_lr)
+
+ if cfg.policy.num_discrete_actions is not None:
+ optimizer_discrete_critic = torch.optim.Adam(
+ params=policy.discrete_critic.parameters(), lr=cfg.policy.critic_lr
+ )
+ optimizer_temperature = torch.optim.Adam(params=[policy.log_alpha], lr=cfg.policy.critic_lr)
+ lr_scheduler = None
+ optimizers = {
+ "actor": optimizer_actor,
+ "critic": optimizer_critic,
+ "temperature": optimizer_temperature,
+ }
+ if cfg.policy.num_discrete_actions is not None:
+ optimizers["discrete_critic"] = optimizer_discrete_critic
+ return optimizers, lr_scheduler
+
+
+#################################################
+# Training setup functions #
+#################################################
+
+
+def handle_resume_logic(cfg: TrainRLServerPipelineConfig) -> TrainRLServerPipelineConfig:
+ """
+ Handle the resume logic for training.
+
+ If resume is True:
+ - Verifies that a checkpoint exists
+ - Loads the checkpoint configuration
+ - Logs resumption details
+ - Returns the checkpoint configuration
+
+ If resume is False:
+ - Checks if an output directory exists (to prevent accidental overwriting)
+ - Returns the original configuration
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): The training configuration
+
+ Returns:
+ TrainRLServerPipelineConfig: The updated configuration
+
+ Raises:
+ RuntimeError: If resume is True but no checkpoint found, or if resume is False but directory exists
+ """
+ out_dir = cfg.output_dir
+
+ # Case 1: Not resuming, but need to check if directory exists to prevent overwrites
+ if not cfg.resume:
+ checkpoint_dir = os.path.join(out_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK)
+ if os.path.exists(checkpoint_dir):
+ raise RuntimeError(
+ f"Output directory {checkpoint_dir} already exists. Use `resume=true` to resume training."
+ )
+ return cfg
+
+ # Case 2: Resuming training
+ checkpoint_dir = os.path.join(out_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK)
+ if not os.path.exists(checkpoint_dir):
+ raise RuntimeError(f"No model checkpoint found in {checkpoint_dir} for resume=True")
+
+ # Log that we found a valid checkpoint and are resuming
+ logging.info(
+ colored(
+ "Valid checkpoint found: resume=True detected, resuming previous run",
+ color="yellow",
+ attrs=["bold"],
+ )
+ )
+
+ # Load config using Draccus
+ checkpoint_cfg_path = os.path.join(checkpoint_dir, PRETRAINED_MODEL_DIR, "train_config.json")
+ checkpoint_cfg = TrainRLServerPipelineConfig.from_pretrained(checkpoint_cfg_path)
+
+ # Ensure resume flag is set in returned config
+ checkpoint_cfg.resume = True
+ return checkpoint_cfg
+
+
+def load_training_state(
+ cfg: TrainRLServerPipelineConfig,
+ optimizers: Optimizer | dict[str, Optimizer],
+):
+ """
+ Loads the training state (optimizers, step count, etc.) from a checkpoint.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): Training configuration
+ optimizers (Optimizer | dict): Optimizers to load state into
+
+ Returns:
+ tuple: (optimization_step, interaction_step) or (None, None) if not resuming
+ """
+ if not cfg.resume:
+ return None, None
+
+ # Construct path to the last checkpoint directory
+ checkpoint_dir = os.path.join(cfg.output_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK)
+
+ logging.info(f"Loading training state from {checkpoint_dir}")
+
+ try:
+ # Use the utility function from train_utils which loads the optimizer state
+ step, optimizers, _ = utils_load_training_state(Path(checkpoint_dir), optimizers, None)
+
+ # Load interaction step separately from training_state.pt
+ training_state_path = os.path.join(checkpoint_dir, TRAINING_STATE_DIR, "training_state.pt")
+ interaction_step = 0
+ if os.path.exists(training_state_path):
+ training_state = torch.load(training_state_path, weights_only=False) # nosec B614: Safe usage of torch.load
+ interaction_step = training_state.get("interaction_step", 0)
+
+ logging.info(f"Resuming from step {step}, interaction step {interaction_step}")
+ return step, interaction_step
+
+ except Exception as e:
+ logging.error(f"Failed to load training state: {e}")
+ return None, None
+
+
+def log_training_info(cfg: TrainRLServerPipelineConfig, policy: nn.Module) -> None:
+ """
+ Log information about the training process.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): Training configuration
+ policy (nn.Module): Policy model
+ """
+ num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
+ num_total_params = sum(p.numel() for p in policy.parameters())
+
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
+ logging.info(f"{cfg.env.task=}")
+ logging.info(f"{cfg.policy.online_steps=}")
+ logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})")
+ logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})")
+
+
+def initialize_replay_buffer(
+ cfg: TrainRLServerPipelineConfig, device: str, storage_device: str
+) -> ReplayBuffer:
+ """
+ Initialize a replay buffer, either empty or from a dataset if resuming.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): Training configuration
+ device (str): Device to store tensors on
+ storage_device (str): Device for storage optimization
+
+ Returns:
+ ReplayBuffer: Initialized replay buffer
+ """
+ if not cfg.resume:
+ return ReplayBuffer(
+ capacity=cfg.policy.online_buffer_capacity,
+ device=device,
+ state_keys=cfg.policy.input_features.keys(),
+ storage_device=storage_device,
+ optimize_memory=True,
+ )
+
+ logging.info("Resume training load the online dataset")
+ dataset_path = os.path.join(cfg.output_dir, "dataset")
+
+ # NOTE: In RL is possible to not have a dataset.
+ repo_id = None
+ if cfg.dataset is not None:
+ repo_id = cfg.dataset.repo_id
+ dataset = LeRobotDataset(
+ repo_id=repo_id,
+ root=dataset_path,
+ )
+ return ReplayBuffer.from_lerobot_dataset(
+ lerobot_dataset=dataset,
+ capacity=cfg.policy.online_buffer_capacity,
+ device=device,
+ state_keys=cfg.policy.input_features.keys(),
+ optimize_memory=True,
+ )
+
+
+def initialize_offline_replay_buffer(
+ cfg: TrainRLServerPipelineConfig,
+ device: str,
+ storage_device: str,
+) -> ReplayBuffer:
+ """
+ Initialize an offline replay buffer from a dataset.
+
+ Args:
+ cfg (TrainRLServerPipelineConfig): Training configuration
+ device (str): Device to store tensors on
+ storage_device (str): Device for storage optimization
+
+ Returns:
+ ReplayBuffer: Initialized offline replay buffer
+ """
+ if not cfg.resume:
+ logging.info("make_dataset offline buffer")
+ offline_dataset = make_dataset(cfg)
+ else:
+ logging.info("load offline dataset")
+ dataset_offline_path = os.path.join(cfg.output_dir, "dataset_offline")
+ offline_dataset = LeRobotDataset(
+ repo_id=cfg.dataset.repo_id,
+ root=dataset_offline_path,
+ )
+
+ logging.info("Convert to a offline replay buffer")
+ offline_replay_buffer = ReplayBuffer.from_lerobot_dataset(
+ offline_dataset,
+ device=device,
+ state_keys=cfg.policy.input_features.keys(),
+ storage_device=storage_device,
+ optimize_memory=True,
+ capacity=cfg.policy.offline_buffer_capacity,
+ )
+ return offline_replay_buffer
+
+
+#################################################
+# Utilities/Helpers functions #
+#################################################
+
+
+def get_observation_features(
+ policy: SACPolicy, observations: torch.Tensor, next_observations: torch.Tensor
+) -> tuple[torch.Tensor | None, torch.Tensor | None]:
+ """
+ Get observation features from the policy encoder. It act as cache for the observation features.
+ when the encoder is frozen, the observation features are not updated.
+ We can save compute by caching the observation features.
+
+ Args:
+ policy: The policy model
+ observations: The current observations
+ next_observations: The next observations
+
+ Returns:
+ tuple: observation_features, next_observation_features
+ """
+
+ if policy.config.vision_encoder_name is None or not policy.config.freeze_vision_encoder:
+ return None, None
+
+ with torch.no_grad():
+ observation_features = policy.actor.encoder.get_cached_image_features(observations, normalize=True)
+ next_observation_features = policy.actor.encoder.get_cached_image_features(
+ next_observations, normalize=True
+ )
+
+ return observation_features, next_observation_features
+
+
+def use_threads(cfg: TrainRLServerPipelineConfig) -> bool:
+ return cfg.policy.concurrency.learner == "threads"
+
+
+def check_nan_in_transition(
+ observations: torch.Tensor,
+ actions: torch.Tensor,
+ next_state: torch.Tensor,
+ raise_error: bool = False,
+) -> bool:
+ """
+ Check for NaN values in transition data.
+
+ Args:
+ observations: Dictionary of observation tensors
+ actions: Action tensor
+ next_state: Dictionary of next state tensors
+ raise_error: If True, raises ValueError when NaN is detected
+
+ Returns:
+ bool: True if NaN values were detected, False otherwise
+ """
+ nan_detected = False
+
+ # Check observations
+ for key, tensor in observations.items():
+ if torch.isnan(tensor).any():
+ logging.error(f"observations[{key}] contains NaN values")
+ nan_detected = True
+ if raise_error:
+ raise ValueError(f"NaN detected in observations[{key}]")
+
+ # Check next state
+ for key, tensor in next_state.items():
+ if torch.isnan(tensor).any():
+ logging.error(f"next_state[{key}] contains NaN values")
+ nan_detected = True
+ if raise_error:
+ raise ValueError(f"NaN detected in next_state[{key}]")
+
+ # Check actions
+ if torch.isnan(actions).any():
+ logging.error("actions contains NaN values")
+ nan_detected = True
+ if raise_error:
+ raise ValueError("NaN detected in actions")
+
+ return nan_detected
+
+
+def push_actor_policy_to_queue(parameters_queue: Queue, policy: nn.Module):
+ logging.debug("[LEARNER] Pushing actor policy to the queue")
+
+ # Create a dictionary to hold all the state dicts
+ state_dicts = {"policy": move_state_dict_to_device(policy.actor.state_dict(), device="cpu")}
+
+ # Add discrete critic if it exists
+ if hasattr(policy, "discrete_critic") and policy.discrete_critic is not None:
+ state_dicts["discrete_critic"] = move_state_dict_to_device(
+ policy.discrete_critic.state_dict(), device="cpu"
+ )
+ logging.debug("[LEARNER] Including discrete critic in state dict push")
+
+ state_bytes = state_to_bytes(state_dicts)
+ parameters_queue.put(state_bytes)
+
+
+def process_interaction_message(
+ message, interaction_step_shift: int, wandb_logger: WandBLogger | None = None
+):
+ """Process a single interaction message with consistent handling."""
+ message = bytes_to_python_object(message)
+ # Shift interaction step for consistency with checkpointed state
+ message["Interaction step"] += interaction_step_shift
+
+ # Log if logger available
+ if wandb_logger:
+ wandb_logger.log_dict(d=message, mode="train", custom_step_key="Interaction step")
+
+ return message
+
+
+def process_transitions(
+ transition_queue: Queue,
+ replay_buffer: ReplayBuffer,
+ offline_replay_buffer: ReplayBuffer,
+ device: str,
+ dataset_repo_id: str | None,
+ shutdown_event: any,
+):
+ """Process all available transitions from the queue.
+
+ Args:
+ transition_queue: Queue for receiving transitions from the actor
+ replay_buffer: Replay buffer to add transitions to
+ offline_replay_buffer: Offline replay buffer to add transitions to
+ device: Device to move transitions to
+ dataset_repo_id: Repository ID for dataset
+ shutdown_event: Event to signal shutdown
+ """
+ while not transition_queue.empty() and not shutdown_event.is_set():
+ transition_list = transition_queue.get()
+ transition_list = bytes_to_transitions(buffer=transition_list)
+
+ for transition in transition_list:
+ transition = move_transition_to_device(transition=transition, device=device)
+
+ # Skip transitions with NaN values
+ if check_nan_in_transition(
+ observations=transition["state"],
+ actions=transition["action"],
+ next_state=transition["next_state"],
+ ):
+ logging.warning("[LEARNER] NaN detected in transition, skipping")
+ continue
+
+ replay_buffer.add(**transition)
+
+ # Add to offline buffer if it's an intervention
+ if dataset_repo_id is not None and transition.get("complementary_info", {}).get(
+ "is_intervention"
+ ):
+ offline_replay_buffer.add(**transition)
+
+
+def process_interaction_messages(
+ interaction_message_queue: Queue,
+ interaction_step_shift: int,
+ wandb_logger: WandBLogger | None,
+ shutdown_event: any,
+) -> dict | None:
+ """Process all available interaction messages from the queue.
+
+ Args:
+ interaction_message_queue: Queue for receiving interaction messages
+ interaction_step_shift: Amount to shift interaction step by
+ wandb_logger: Logger for tracking progress
+ shutdown_event: Event to signal shutdown
+
+ Returns:
+ dict | None: The last interaction message processed, or None if none were processed
+ """
+ last_message = None
+ while not interaction_message_queue.empty() and not shutdown_event.is_set():
+ message = interaction_message_queue.get()
+ last_message = process_interaction_message(
+ message=message,
+ interaction_step_shift=interaction_step_shift,
+ wandb_logger=wandb_logger,
+ )
+
+ return last_message
+
+
+if __name__ == "__main__":
+ train_cli()
+ logging.info("[LEARNER] main finished")
diff --git a/src/lerobot/scripts/rl/learner_service.py b/src/lerobot/scripts/rl/learner_service.py
new file mode 100644
index 0000000000..b07c296e6e
--- /dev/null
+++ b/src/lerobot/scripts/rl/learner_service.py
@@ -0,0 +1,117 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from multiprocessing import Event, Queue
+
+from lerobot.transport import services_pb2, services_pb2_grpc
+from lerobot.transport.utils import receive_bytes_in_chunks, send_bytes_in_chunks
+from lerobot.utils.queue import get_last_item_from_queue
+
+MAX_WORKERS = 3 # Stream parameters, send transitions and interactions
+SHUTDOWN_TIMEOUT = 10
+
+
+class LearnerService(services_pb2_grpc.LearnerServiceServicer):
+ """
+ Implementation of the LearnerService gRPC service
+ This service is used to send parameters to the Actor and receive transitions and interactions from the Actor
+ check transport.proto for the gRPC service definition
+ """
+
+ def __init__(
+ self,
+ shutdown_event: Event, # type: ignore
+ parameters_queue: Queue,
+ seconds_between_pushes: float,
+ transition_queue: Queue,
+ interaction_message_queue: Queue,
+ queue_get_timeout: float = 0.001,
+ ):
+ self.shutdown_event = shutdown_event
+ self.parameters_queue = parameters_queue
+ self.seconds_between_pushes = seconds_between_pushes
+ self.transition_queue = transition_queue
+ self.interaction_message_queue = interaction_message_queue
+ self.queue_get_timeout = queue_get_timeout
+
+ def StreamParameters(self, request, context): # noqa: N802
+ # TODO: authorize the request
+ logging.info("[LEARNER] Received request to stream parameters from the Actor")
+
+ last_push_time = 0
+
+ while not self.shutdown_event.is_set():
+ time_since_last_push = time.time() - last_push_time
+ if time_since_last_push < self.seconds_between_pushes:
+ self.shutdown_event.wait(self.seconds_between_pushes - time_since_last_push)
+ # Continue, because we could receive a shutdown event,
+ # and it's checked in the while loop
+ continue
+
+ logging.info("[LEARNER] Push parameters to the Actor")
+ buffer = get_last_item_from_queue(
+ self.parameters_queue, block=True, timeout=self.queue_get_timeout
+ )
+
+ if buffer is None:
+ continue
+
+ yield from send_bytes_in_chunks(
+ buffer,
+ services_pb2.Parameters,
+ log_prefix="[LEARNER] Sending parameters",
+ silent=True,
+ )
+
+ last_push_time = time.time()
+ logging.info("[LEARNER] Parameters sent")
+
+ logging.info("[LEARNER] Stream parameters finished")
+ return services_pb2.Empty()
+
+ def SendTransitions(self, request_iterator, _context): # noqa: N802
+ # TODO: authorize the request
+ logging.info("[LEARNER] Received request to receive transitions from the Actor")
+
+ receive_bytes_in_chunks(
+ request_iterator,
+ self.transition_queue,
+ self.shutdown_event,
+ log_prefix="[LEARNER] transitions",
+ )
+
+ logging.debug("[LEARNER] Finished receiving transitions")
+ return services_pb2.Empty()
+
+ def SendInteractions(self, request_iterator, _context): # noqa: N802
+ # TODO: authorize the request
+ logging.info("[LEARNER] Received request to receive interactions from the Actor")
+
+ receive_bytes_in_chunks(
+ request_iterator,
+ self.interaction_message_queue,
+ self.shutdown_event,
+ log_prefix="[LEARNER] interactions",
+ )
+
+ logging.debug("[LEARNER] Finished receiving interactions")
+ return services_pb2.Empty()
+
+ def Ready(self, request, context): # noqa: N802
+ return services_pb2.Empty()
diff --git a/src/lerobot/scripts/server/configs.py b/src/lerobot/scripts/server/configs.py
new file mode 100644
index 0000000000..5be46485ea
--- /dev/null
+++ b/src/lerobot/scripts/server/configs.py
@@ -0,0 +1,197 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections.abc import Callable
+from dataclasses import dataclass, field
+
+import torch
+
+from lerobot.robots.config import RobotConfig
+from lerobot.scripts.server.constants import (
+ DEFAULT_FPS,
+ DEFAULT_INFERENCE_LATENCY,
+ DEFAULT_OBS_QUEUE_TIMEOUT,
+)
+
+# Aggregate function registry for CLI usage
+AGGREGATE_FUNCTIONS = {
+ "weighted_average": lambda old, new: 0.3 * old + 0.7 * new,
+ "latest_only": lambda old, new: new,
+ "average": lambda old, new: 0.5 * old + 0.5 * new,
+ "conservative": lambda old, new: 0.7 * old + 0.3 * new,
+}
+
+
+def get_aggregate_function(name: str) -> Callable[[torch.Tensor, torch.Tensor], torch.Tensor]:
+ """Get aggregate function by name from registry."""
+ if name not in AGGREGATE_FUNCTIONS:
+ available = list(AGGREGATE_FUNCTIONS.keys())
+ raise ValueError(f"Unknown aggregate function '{name}'. Available: {available}")
+ return AGGREGATE_FUNCTIONS[name]
+
+
+@dataclass
+class PolicyServerConfig:
+ """Configuration for PolicyServer.
+
+ This class defines all configurable parameters for the PolicyServer,
+ including networking settings and action chunking specifications.
+ """
+
+ # Networking configuration
+ host: str = field(default="localhost", metadata={"help": "Host address to bind the server to"})
+ port: int = field(default=8080, metadata={"help": "Port number to bind the server to"})
+
+ # Timing configuration
+ fps: int = field(default=DEFAULT_FPS, metadata={"help": "Frames per second"})
+ inference_latency: float = field(
+ default=DEFAULT_INFERENCE_LATENCY, metadata={"help": "Target inference latency in seconds"}
+ )
+
+ obs_queue_timeout: float = field(
+ default=DEFAULT_OBS_QUEUE_TIMEOUT, metadata={"help": "Timeout for observation queue in seconds"}
+ )
+
+ def __post_init__(self):
+ """Validate configuration after initialization."""
+ if self.port < 1 or self.port > 65535:
+ raise ValueError(f"Port must be between 1 and 65535, got {self.port}")
+
+ if self.environment_dt <= 0:
+ raise ValueError(f"environment_dt must be positive, got {self.environment_dt}")
+
+ if self.inference_latency < 0:
+ raise ValueError(f"inference_latency must be non-negative, got {self.inference_latency}")
+
+ if self.obs_queue_timeout < 0:
+ raise ValueError(f"obs_queue_timeout must be non-negative, got {self.obs_queue_timeout}")
+
+ @classmethod
+ def from_dict(cls, config_dict: dict) -> "PolicyServerConfig":
+ """Create a PolicyServerConfig from a dictionary."""
+ return cls(**config_dict)
+
+ @property
+ def environment_dt(self) -> float:
+ """Environment time step, in seconds"""
+ return 1 / self.fps
+
+ def to_dict(self) -> dict:
+ """Convert the configuration to a dictionary."""
+ return {
+ "host": self.host,
+ "port": self.port,
+ "fps": self.fps,
+ "environment_dt": self.environment_dt,
+ "inference_latency": self.inference_latency,
+ }
+
+
+@dataclass
+class RobotClientConfig:
+ """Configuration for RobotClient.
+
+ This class defines all configurable parameters for the RobotClient,
+ including network connection, policy settings, and control behavior.
+ """
+
+ # Policy configuration
+ policy_type: str = field(metadata={"help": "Type of policy to use"})
+ pretrained_name_or_path: str = field(metadata={"help": "Pretrained model name or path"})
+
+ # Robot configuration (for CLI usage - robot instance will be created from this)
+ robot: RobotConfig = field(metadata={"help": "Robot configuration"})
+
+ # Policies typically output K actions at max, but we can use less to avoid wasting bandwidth (as actions
+ # would be aggregated on the client side anyway, depending on the value of `chunk_size_threshold`)
+ actions_per_chunk: int = field(metadata={"help": "Number of actions per chunk"})
+
+ # Task instruction for the robot to execute (e.g., 'fold my tshirt')
+ task: str = field(default="", metadata={"help": "Task instruction for the robot to execute"})
+
+ # Network configuration
+ server_address: str = field(default="localhost:8080", metadata={"help": "Server address to connect to"})
+
+ # Device configuration
+ policy_device: str = field(default="cpu", metadata={"help": "Device for policy inference"})
+
+ # Control behavior configuration
+ chunk_size_threshold: float = field(default=0.5, metadata={"help": "Threshold for chunk size control"})
+ fps: int = field(default=DEFAULT_FPS, metadata={"help": "Frames per second"})
+
+ # Aggregate function configuration (CLI-compatible)
+ aggregate_fn_name: str = field(
+ default="weighted_average",
+ metadata={"help": f"Name of aggregate function to use. Options: {list(AGGREGATE_FUNCTIONS.keys())}"},
+ )
+
+ # Debug configuration
+ debug_visualize_queue_size: bool = field(
+ default=False, metadata={"help": "Visualize the action queue size"}
+ )
+
+ # Verification configuration
+ verify_robot_cameras: bool = field(
+ default=True, metadata={"help": "Verify that the robot cameras match the policy cameras"}
+ )
+
+ @property
+ def environment_dt(self) -> float:
+ """Environment time step, in seconds"""
+ return 1 / self.fps
+
+ def __post_init__(self):
+ """Validate configuration after initialization."""
+ if not self.server_address:
+ raise ValueError("server_address cannot be empty")
+
+ if not self.policy_type:
+ raise ValueError("policy_type cannot be empty")
+
+ if not self.pretrained_name_or_path:
+ raise ValueError("pretrained_name_or_path cannot be empty")
+
+ if not self.policy_device:
+ raise ValueError("policy_device cannot be empty")
+
+ if self.chunk_size_threshold < 0 or self.chunk_size_threshold > 1:
+ raise ValueError(f"chunk_size_threshold must be between 0 and 1, got {self.chunk_size_threshold}")
+
+ if self.fps <= 0:
+ raise ValueError(f"fps must be positive, got {self.fps}")
+
+ if self.actions_per_chunk <= 0:
+ raise ValueError(f"actions_per_chunk must be positive, got {self.actions_per_chunk}")
+
+ self.aggregate_fn = get_aggregate_function(self.aggregate_fn_name)
+
+ @classmethod
+ def from_dict(cls, config_dict: dict) -> "RobotClientConfig":
+ """Create a RobotClientConfig from a dictionary."""
+ return cls(**config_dict)
+
+ def to_dict(self) -> dict:
+ """Convert the configuration to a dictionary."""
+ return {
+ "server_address": self.server_address,
+ "policy_type": self.policy_type,
+ "pretrained_name_or_path": self.pretrained_name_or_path,
+ "policy_device": self.policy_device,
+ "chunk_size_threshold": self.chunk_size_threshold,
+ "fps": self.fps,
+ "actions_per_chunk": self.actions_per_chunk,
+ "task": self.task,
+ "debug_visualize_queue_size": self.debug_visualize_queue_size,
+ "aggregate_fn_name": self.aggregate_fn_name,
+ }
diff --git a/src/lerobot/scripts/server/constants.py b/src/lerobot/scripts/server/constants.py
new file mode 100644
index 0000000000..af983a8005
--- /dev/null
+++ b/src/lerobot/scripts/server/constants.py
@@ -0,0 +1,29 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client side: The environment evolves with a time resolution equal to 1/fps"""
+
+DEFAULT_FPS = 30
+
+"""Server side: Running inference on (at most) 1/fps"""
+DEFAULT_INFERENCE_LATENCY = 1 / DEFAULT_FPS
+
+"""Server side: Timeout for observation queue in seconds"""
+DEFAULT_OBS_QUEUE_TIMEOUT = 2
+
+# All action chunking policies
+SUPPORTED_POLICIES = ["act", "smolvla", "diffusion", "pi0", "tdmpc", "vqbet"]
+
+# TODO: Add all other robots
+SUPPORTED_ROBOTS = ["so100_follower", "so101_follower"]
diff --git a/src/lerobot/scripts/server/helpers.py b/src/lerobot/scripts/server/helpers.py
new file mode 100644
index 0000000000..7fd56e6933
--- /dev/null
+++ b/src/lerobot/scripts/server/helpers.py
@@ -0,0 +1,386 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import logging
+import logging.handlers
+import os
+import time
+from dataclasses import dataclass
+from pathlib import Path
+from threading import Event
+from typing import Any
+
+import torch
+
+from lerobot.configs.types import PolicyFeature
+from lerobot.constants import OBS_IMAGES, OBS_STATE
+from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
+
+# NOTE: Configs need to be loaded for the client to be able to instantiate the policy config
+from lerobot.policies import ACTConfig, DiffusionConfig, PI0Config, SmolVLAConfig, VQBeTConfig # noqa: F401
+from lerobot.robots.robot import Robot
+from lerobot.transport import async_inference_pb2
+from lerobot.transport.utils import bytes_buffer_size
+from lerobot.utils.utils import init_logging
+
+Action = torch.Tensor
+ActionChunk = torch.Tensor
+
+# observation as received from the robot
+RawObservation = dict[str, torch.Tensor]
+
+# observation as those recorded in LeRobot dataset (keys are different)
+LeRobotObservation = dict[str, torch.Tensor]
+
+# observation, ready for policy inference (image keys resized)
+Observation = dict[str, torch.Tensor]
+
+
+def visualize_action_queue_size(action_queue_size: list[int]) -> None:
+ import matplotlib.pyplot as plt
+
+ fig, ax = plt.subplots()
+ ax.set_title("Action Queue Size Over Time")
+ ax.set_xlabel("Environment steps")
+ ax.set_ylabel("Action Queue Size")
+ ax.set_ylim(0, max(action_queue_size) * 1.1)
+ ax.grid(True, alpha=0.3)
+ ax.plot(range(len(action_queue_size)), action_queue_size)
+ plt.show()
+
+
+def validate_robot_cameras_for_policy(
+ lerobot_observation_features: dict[str, dict], policy_image_features: dict[str, PolicyFeature]
+) -> None:
+ image_keys = list(filter(is_image_key, lerobot_observation_features))
+ assert set(image_keys) == set(policy_image_features.keys()), (
+ f"Policy image features must match robot cameras! Received {list(policy_image_features.keys())} != {image_keys}"
+ )
+
+
+def map_robot_keys_to_lerobot_features(robot: Robot) -> dict[str, dict]:
+ return hw_to_dataset_features(robot.observation_features, "observation", use_video=False)
+
+
+def is_image_key(k: str) -> bool:
+ return k.startswith(OBS_IMAGES)
+
+
+def resize_robot_observation_image(image: torch.tensor, resize_dims: tuple[int, int, int]) -> torch.tensor:
+ assert image.ndim == 3, f"Image must be (C, H, W)! Received {image.shape}"
+ # (H, W, C) -> (C, H, W) for resizing from robot obsevation resolution to policy image resolution
+ image = image.permute(2, 0, 1)
+ dims = (resize_dims[1], resize_dims[2])
+ # Add batch dimension for interpolate: (C, H, W) -> (1, C, H, W)
+ image_batched = image.unsqueeze(0)
+ # Interpolate and remove batch dimension: (1, C, H, W) -> (C, H, W)
+ resized = torch.nn.functional.interpolate(image_batched, size=dims, mode="bilinear", align_corners=False)
+
+ return resized.squeeze(0)
+
+
+def raw_observation_to_observation(
+ raw_observation: RawObservation,
+ lerobot_features: dict[str, dict],
+ policy_image_features: dict[str, PolicyFeature],
+ device: str,
+) -> Observation:
+ observation = {}
+
+ observation = prepare_raw_observation(raw_observation, lerobot_features, policy_image_features)
+ for k, v in observation.items():
+ if isinstance(v, torch.Tensor): # VLAs present natural-language instructions in observations
+ if "image" in k:
+ # Policy expects images in shape (B, C, H, W)
+ observation[k] = prepare_image(v).unsqueeze(0).to(device)
+ else:
+ observation[k] = v.to(device)
+ else:
+ observation[k] = v
+
+ return observation
+
+
+def prepare_image(image: torch.Tensor) -> torch.Tensor:
+ """Minimal preprocessing to turn int8 images to float32 in [0, 1], and create a memory-contiguous tensor"""
+ image = image.type(torch.float32) / 255
+ image = image.contiguous()
+
+ return image
+
+
+def extract_state_from_raw_observation(
+ lerobot_obs: RawObservation,
+) -> torch.Tensor:
+ """Extract the state from a raw observation."""
+ state = torch.tensor(lerobot_obs[OBS_STATE])
+
+ if state.ndim == 1:
+ state = state.unsqueeze(0)
+
+ return state
+
+
+def extract_images_from_raw_observation(
+ lerobot_obs: RawObservation,
+ camera_key: str,
+) -> dict[str, torch.Tensor]:
+ """Extract the images from a raw observation."""
+ return torch.tensor(lerobot_obs[camera_key])
+
+
+def make_lerobot_observation(
+ robot_obs: RawObservation,
+ lerobot_features: dict[str, dict],
+) -> LeRobotObservation:
+ """Make a lerobot observation from a raw observation."""
+ return build_dataset_frame(lerobot_features, robot_obs, prefix="observation")
+
+
+def prepare_raw_observation(
+ robot_obs: RawObservation,
+ lerobot_features: dict[str, dict],
+ policy_image_features: dict[str, PolicyFeature],
+) -> Observation:
+ """Matches keys from the raw robot_obs dict to the keys expected by a given policy (passed as
+ policy_image_features)."""
+ # 1. {motor.pos1:value1, motor.pos2:value2, ..., laptop:np.ndarray} ->
+ # -> {observation.state:[value1,value2,...], observation.images.laptop:np.ndarray}
+ lerobot_obs = make_lerobot_observation(robot_obs, lerobot_features)
+
+ # 2. Greps all observation.images.<> keys
+ image_keys = list(filter(is_image_key, lerobot_obs))
+ # state's shape is expected as (B, state_dim)
+ state_dict = {OBS_STATE: extract_state_from_raw_observation(lerobot_obs)}
+ image_dict = {
+ image_k: extract_images_from_raw_observation(lerobot_obs, image_k) for image_k in image_keys
+ }
+
+ # Turns the image features to (C, H, W) with H, W matching the policy image features.
+ # This reduces the resolution of the images
+ image_dict = {
+ key: resize_robot_observation_image(torch.tensor(lerobot_obs[key]), policy_image_features[key].shape)
+ for key in image_keys
+ }
+
+ if "task" in robot_obs:
+ state_dict["task"] = robot_obs["task"]
+
+ return {**state_dict, **image_dict}
+
+
+def get_logger(name: str, log_to_file: bool = True) -> logging.Logger:
+ """
+ Get a logger using the standardized logging setup from utils.py.
+
+ Args:
+ name: Logger name (e.g., 'policy_server', 'robot_client')
+ log_to_file: Whether to also log to a file
+
+ Returns:
+ Configured logger instance
+ """
+ # Create logs directory if logging to file
+ if log_to_file:
+ os.makedirs("logs", exist_ok=True)
+ log_file = Path(f"logs/{name}_{int(time.time())}.log")
+ else:
+ log_file = None
+
+ # Initialize the standardized logging
+ init_logging(log_file=log_file, display_pid=False)
+
+ # Return a named logger
+ return logging.getLogger(name)
+
+
+@dataclass
+class TimedData:
+ """A data object with timestamp and timestep information.
+
+ Args:
+ timestamp: Unix timestamp relative to data's creation.
+ data: The actual data to wrap a timestamp around.
+ timestep: The timestep of the data.
+ """
+
+ timestamp: float
+ timestep: int
+
+ def get_timestamp(self):
+ return self.timestamp
+
+ def get_timestep(self):
+ return self.timestep
+
+
+@dataclass
+class TimedAction(TimedData):
+ action: Action
+
+ def get_action(self):
+ return self.action
+
+
+@dataclass
+class TimedObservation(TimedData):
+ observation: RawObservation
+ must_go: bool = False
+
+ def get_observation(self):
+ return self.observation
+
+
+@dataclass
+class FPSTracker:
+ """Utility class to track FPS metrics over time."""
+
+ target_fps: float
+ first_timestamp: float = None
+ total_obs_count: int = 0
+
+ def calculate_fps_metrics(self, current_timestamp: float) -> dict[str, float]:
+ """Calculate average FPS vs target"""
+ self.total_obs_count += 1
+
+ # Initialize first observation time
+ if self.first_timestamp is None:
+ self.first_timestamp = current_timestamp
+
+ # Calculate overall average FPS (since start)
+ total_duration = current_timestamp - self.first_timestamp
+ avg_fps = (self.total_obs_count - 1) / total_duration if total_duration > 1e-6 else 0.0
+
+ return {"avg_fps": avg_fps, "target_fps": self.target_fps}
+
+ def reset(self):
+ """Reset the FPS tracker state"""
+ self.first_timestamp = None
+ self.total_obs_count = 0
+
+
+@dataclass
+class RemotePolicyConfig:
+ policy_type: str
+ pretrained_name_or_path: str
+ lerobot_features: dict[str, PolicyFeature]
+ actions_per_chunk: int
+ device: str = "cpu"
+
+
+def _compare_observation_states(obs1_state: torch.Tensor, obs2_state: torch.Tensor, atol: float) -> bool:
+ """Check if two observation states are similar, under a tolerance threshold"""
+ return bool(torch.linalg.norm(obs1_state - obs2_state) < atol)
+
+
+def observations_similar(
+ obs1: TimedObservation, obs2: TimedObservation, lerobot_features: dict[str, dict], atol: float = 1
+) -> bool:
+ """Check if two observations are similar, under a tolerance threshold. Measures distance between
+ observations as the difference in joint-space between the two observations.
+
+ NOTE(fracapuano): This is a very simple check, and it is enough for the current use case.
+ An immediate next step is to use (fast) perceptual difference metrics comparing some camera views,
+ to surpass this joint-space similarity check.
+ """
+ obs1_state = extract_state_from_raw_observation(
+ make_lerobot_observation(obs1.get_observation(), lerobot_features)
+ )
+ obs2_state = extract_state_from_raw_observation(
+ make_lerobot_observation(obs2.get_observation(), lerobot_features)
+ )
+
+ return _compare_observation_states(obs1_state, obs2_state, atol=atol)
+
+
+def send_bytes_in_chunks(
+ buffer: bytes,
+ message_class: Any,
+ log_prefix: str = "",
+ silent: bool = True,
+ chunk_size: int = 3 * 1024 * 1024,
+):
+ # NOTE(fracapuano): Partially copied from lerobot.common.transport.utils.send_bytes_in_chunks. Duplication can't be avoided if we
+ # don't use a unique class for messages sent (due to the different transfer states sent). Also, I'd want more control over the
+ # chunk size as I am using it to send image observations.
+ buffer = io.BytesIO(buffer)
+ size_in_bytes = bytes_buffer_size(buffer)
+
+ sent_bytes = 0
+
+ logging_method = logging.info if not silent else logging.debug
+
+ logging_method(f"{log_prefix} Buffer size {size_in_bytes / 1024 / 1024} MB with")
+
+ while sent_bytes < size_in_bytes:
+ transfer_state = async_inference_pb2.TransferState.TRANSFER_MIDDLE
+
+ if sent_bytes + chunk_size >= size_in_bytes:
+ transfer_state = async_inference_pb2.TransferState.TRANSFER_END
+ elif sent_bytes == 0:
+ transfer_state = async_inference_pb2.TransferState.TRANSFER_BEGIN
+
+ size_to_read = min(chunk_size, size_in_bytes - sent_bytes)
+ chunk = buffer.read(size_to_read)
+
+ yield message_class(transfer_state=transfer_state, data=chunk)
+ sent_bytes += size_to_read
+ logging_method(f"{log_prefix} Sent {sent_bytes}/{size_in_bytes} bytes with state {transfer_state}")
+
+ logging_method(f"{log_prefix} Published {sent_bytes / 1024 / 1024} MB")
+
+
+def receive_bytes_in_chunks(
+ iterator, continue_receiving: Event, logger: logging.Logger, log_prefix: str = ""
+): # type: ignore
+ # NOTE(fracapuano): Partially copied from lerobot.common.transport.utils.receive_bytes_in_chunks. Duplication can't be avoided if we
+ # don't use a unique class for messages sent (due to the different transfer states sent). Also, on the server side the logic for receiving
+ # is opposite then the HIL-SERL design (my event showcases keeping on running instead of shutdown)
+ bytes_buffer = io.BytesIO()
+ step = 0
+
+ logger.info(f"{log_prefix} Starting receiver")
+ for item in iterator:
+ logger.debug(f"{log_prefix} Received item")
+ if not continue_receiving.is_set():
+ logger.info(f"{log_prefix} Shutting down receiver")
+ return
+
+ if item.transfer_state == async_inference_pb2.TransferState.TRANSFER_BEGIN:
+ bytes_buffer.seek(0)
+ bytes_buffer.truncate(0)
+ bytes_buffer.write(item.data)
+ logger.debug(f"{log_prefix} Received data at step 0")
+
+ elif item.transfer_state == async_inference_pb2.TransferState.TRANSFER_MIDDLE:
+ bytes_buffer.write(item.data)
+ step += 1
+ logger.debug(f"{log_prefix} Received data at step {step}")
+
+ elif item.transfer_state == async_inference_pb2.TransferState.TRANSFER_END:
+ bytes_buffer.write(item.data)
+ logger.debug(f"{log_prefix} Received data at step end size {bytes_buffer_size(bytes_buffer)}")
+
+ complete_bytes = bytes_buffer.getvalue()
+
+ bytes_buffer.seek(0)
+ bytes_buffer.truncate(0)
+
+ logger.debug(f"{log_prefix} Queue updated")
+ return complete_bytes
+
+ else:
+ logger.warning(f"{log_prefix} Received unknown transfer state {item.transfer_state}")
+ raise ValueError(f"Received unknown transfer state {item.transfer_state}")
diff --git a/src/lerobot/scripts/server/policy_server.py b/src/lerobot/scripts/server/policy_server.py
new file mode 100644
index 0000000000..13ba976e20
--- /dev/null
+++ b/src/lerobot/scripts/server/policy_server.py
@@ -0,0 +1,403 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Example:
+```shell
+python src/lerobot/scripts/server/policy_server.py \
+ --host=127.0.0.1 \
+ --port=8080 \
+ --fps=30 \
+ --inference_latency=0.033 \
+ --obs_queue_timeout=1
+```
+"""
+
+import logging
+import pickle # nosec
+import threading
+import time
+from concurrent import futures
+from dataclasses import asdict
+from pprint import pformat
+from queue import Empty, Queue
+
+import draccus
+import grpc
+import torch
+
+from lerobot.policies.factory import get_policy_class
+from lerobot.scripts.server.configs import PolicyServerConfig
+from lerobot.scripts.server.constants import SUPPORTED_POLICIES
+from lerobot.scripts.server.helpers import (
+ FPSTracker,
+ Observation,
+ RemotePolicyConfig,
+ TimedAction,
+ TimedObservation,
+ get_logger,
+ observations_similar,
+ raw_observation_to_observation,
+ receive_bytes_in_chunks,
+)
+from lerobot.transport import (
+ async_inference_pb2, # type: ignore
+ async_inference_pb2_grpc, # type: ignore
+)
+
+
+class PolicyServer(async_inference_pb2_grpc.AsyncInferenceServicer):
+ prefix = "policy_server"
+ logger = get_logger(prefix)
+
+ def __init__(self, config: PolicyServerConfig):
+ self.config = config
+ self._running_event = threading.Event()
+
+ # FPS measurement
+ self.fps_tracker = FPSTracker(target_fps=config.fps)
+
+ self.observation_queue = Queue(maxsize=1)
+
+ self._predicted_timesteps_lock = threading.Lock()
+ self._predicted_timesteps = set()
+
+ self.last_processed_obs = None
+
+ # Attributes will be set by SendPolicyInstructions
+ self.device = None
+ self.policy_type = None
+ self.lerobot_features = None
+ self.actions_per_chunk = None
+ self.policy = None
+
+ @property
+ def running(self):
+ return self._running_event.is_set()
+
+ @property
+ def policy_image_features(self):
+ return self.policy.config.image_features
+
+ def _reset_server(self) -> None:
+ """Flushes server state when new client connects."""
+ # only running inference on the latest observation received by the server
+ self._running_event.clear()
+ self.observation_queue = Queue(maxsize=1)
+
+ with self._predicted_timesteps_lock:
+ self._predicted_timesteps = set()
+
+ def Ready(self, request, context): # noqa: N802
+ client_id = context.peer()
+ self.logger.info(f"Client {client_id} connected and ready")
+ self._reset_server()
+ self._running_event.set()
+
+ return async_inference_pb2.Empty()
+
+ def SendPolicyInstructions(self, request, context): # noqa: N802
+ """Receive policy instructions from the robot client"""
+
+ if not self.running:
+ self.logger.warning("Server is not running. Ignoring policy instructions.")
+ return async_inference_pb2.Empty()
+
+ client_id = context.peer()
+
+ policy_specs = pickle.loads(request.data) # nosec
+
+ if not isinstance(policy_specs, RemotePolicyConfig):
+ raise TypeError(f"Policy specs must be a RemotePolicyConfig. Got {type(policy_specs)}")
+
+ if policy_specs.policy_type not in SUPPORTED_POLICIES:
+ raise ValueError(
+ f"Policy type {policy_specs.policy_type} not supported. "
+ f"Supported policies: {SUPPORTED_POLICIES}"
+ )
+
+ self.logger.info(
+ f"Receiving policy instructions from {client_id} | "
+ f"Policy type: {policy_specs.policy_type} | "
+ f"Pretrained name or path: {policy_specs.pretrained_name_or_path} | "
+ f"Actions per chunk: {policy_specs.actions_per_chunk} | "
+ f"Device: {policy_specs.device}"
+ )
+
+ self.device = policy_specs.device
+ self.policy_type = policy_specs.policy_type # act, pi0, etc.
+ self.lerobot_features = policy_specs.lerobot_features
+ self.actions_per_chunk = policy_specs.actions_per_chunk
+
+ policy_class = get_policy_class(self.policy_type)
+
+ start = time.perf_counter()
+ self.policy = policy_class.from_pretrained(policy_specs.pretrained_name_or_path)
+ self.policy.to(self.device)
+ end = time.perf_counter()
+
+ self.logger.info(f"Time taken to put policy on {self.device}: {end - start:.4f} seconds")
+
+ return async_inference_pb2.Empty()
+
+ def SendObservations(self, request_iterator, context): # noqa: N802
+ """Receive observations from the robot client"""
+ client_id = context.peer()
+ self.logger.debug(f"Receiving observations from {client_id}")
+
+ receive_time = time.time() # comparing timestamps so need time.time()
+ start_deserialize = time.perf_counter()
+ received_bytes = receive_bytes_in_chunks(
+ request_iterator, self._running_event, self.logger
+ ) # blocking call while looping over request_iterator
+ timed_observation = pickle.loads(received_bytes) # nosec
+ deserialize_time = time.perf_counter() - start_deserialize
+
+ self.logger.debug(f"Received observation #{timed_observation.get_timestep()}")
+
+ obs_timestep = timed_observation.get_timestep()
+ obs_timestamp = timed_observation.get_timestamp()
+
+ # Calculate FPS metrics
+ fps_metrics = self.fps_tracker.calculate_fps_metrics(obs_timestamp)
+
+ self.logger.info(
+ f"Received observation #{obs_timestep} | "
+ f"Avg FPS: {fps_metrics['avg_fps']:.2f} | " # fps at which observations are received from client
+ f"Target: {fps_metrics['target_fps']:.2f} | "
+ f"One-way latency: {(receive_time - obs_timestamp) * 1000:.2f}ms"
+ )
+
+ self.logger.debug(
+ f"Server timestamp: {receive_time:.6f} | "
+ f"Client timestamp: {obs_timestamp:.6f} | "
+ f"Deserialization time: {deserialize_time:.6f}s"
+ )
+
+ if not self._enqueue_observation(
+ timed_observation # wrapping a RawObservation
+ ):
+ self.logger.info(f"Observation #{obs_timestep} has been filtered out")
+
+ return async_inference_pb2.Empty()
+
+ def GetActions(self, request, context): # noqa: N802
+ """Returns actions to the robot client. Actions are sent as a single
+ chunk, containing multiple actions."""
+ client_id = context.peer()
+ self.logger.debug(f"Client {client_id} connected for action streaming")
+
+ # Generate action based on the most recent observation and its timestep
+ try:
+ getactions_starts = time.perf_counter()
+ obs = self.observation_queue.get(timeout=self.config.obs_queue_timeout)
+ self.logger.info(
+ f"Running inference for observation #{obs.get_timestep()} (must_go: {obs.must_go})"
+ )
+
+ with self._predicted_timesteps_lock:
+ self._predicted_timesteps.add(obs.get_timestep())
+
+ start_time = time.perf_counter()
+ action_chunk = self._predict_action_chunk(obs)
+ inference_time = time.perf_counter() - start_time
+
+ start_time = time.perf_counter()
+ actions_bytes = pickle.dumps(action_chunk) # nosec
+ serialize_time = time.perf_counter() - start_time
+
+ # Create and return the action chunk
+ actions = async_inference_pb2.Actions(data=actions_bytes)
+
+ self.logger.info(
+ f"Action chunk #{obs.get_timestep()} generated | "
+ f"Total time: {(inference_time + serialize_time) * 1000:.2f}ms"
+ )
+
+ self.logger.debug(
+ f"Action chunk #{obs.get_timestep()} generated | "
+ f"Inference time: {inference_time:.2f}s |"
+ f"Serialize time: {serialize_time:.2f}s |"
+ f"Total time: {inference_time + serialize_time:.2f}s"
+ )
+
+ time.sleep(
+ max(0, self.config.inference_latency - max(0, time.perf_counter() - getactions_starts))
+ ) # sleep controls inference latency
+
+ return actions
+
+ except Empty: # no observation added to queue in obs_queue_timeout
+ return async_inference_pb2.Empty()
+
+ except Exception as e:
+ self.logger.error(f"Error in StreamActions: {e}")
+
+ return async_inference_pb2.Empty()
+
+ def _obs_sanity_checks(self, obs: TimedObservation, previous_obs: TimedObservation) -> bool:
+ """Check if the observation is valid to be processed by the policy"""
+ with self._predicted_timesteps_lock:
+ predicted_timesteps = self._predicted_timesteps
+
+ if obs.get_timestep() in predicted_timesteps:
+ self.logger.debug(f"Skipping observation #{obs.get_timestep()} - Timestep predicted already!")
+ return False
+
+ elif observations_similar(obs, previous_obs, lerobot_features=self.lerobot_features):
+ self.logger.debug(
+ f"Skipping observation #{obs.get_timestep()} - Observation too similar to last obs predicted!"
+ )
+ return False
+
+ else:
+ return True
+
+ def _enqueue_observation(self, obs: TimedObservation) -> bool:
+ """Enqueue an observation if it must go through processing, otherwise skip it.
+ Observations not in queue are never run through the policy network"""
+
+ if (
+ obs.must_go
+ or self.last_processed_obs is None
+ or self._obs_sanity_checks(obs, self.last_processed_obs)
+ ):
+ last_obs = self.last_processed_obs.get_timestep() if self.last_processed_obs else "None"
+ self.logger.debug(
+ f"Enqueuing observation. Must go: {obs.must_go} | Last processed obs: {last_obs}"
+ )
+
+ # If queue is full, get the old observation to make room
+ if self.observation_queue.full():
+ # pops from queue
+ _ = self.observation_queue.get_nowait()
+ self.logger.debug("Observation queue was full, removed oldest observation")
+
+ # Now put the new observation (never blocks as queue is non-full here)
+ self.observation_queue.put(obs)
+ return True
+
+ return False
+
+ def _time_action_chunk(self, t_0: float, action_chunk: list[torch.Tensor], i_0: int) -> list[TimedAction]:
+ """Turn a chunk of actions into a list of TimedAction instances,
+ with the first action corresponding to t_0 and the rest corresponding to
+ t_0 + i*environment_dt for i in range(len(action_chunk))
+ """
+ return [
+ TimedAction(timestamp=t_0 + i * self.config.environment_dt, timestep=i_0 + i, action=action)
+ for i, action in enumerate(action_chunk)
+ ]
+
+ def _prepare_observation(self, observation_t: TimedObservation) -> Observation:
+ """
+ Prepare observation, ready for policy inference.
+ E.g.: To keep observation sampling rate high (and network packet tiny) we send int8 [0,255] images from the
+ client and then convert them to float32 [0,1] images here, before running inference.
+ """
+ # RawObservation from robot.get_observation() - wrong keys, wrong dtype, wrong image shape
+ observation: Observation = raw_observation_to_observation(
+ observation_t.get_observation(),
+ self.lerobot_features,
+ self.policy_image_features,
+ self.device,
+ )
+ # processed Observation - right keys, right dtype, right image shape
+
+ return observation
+
+ def _get_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Get an action chunk from the policy. The chunk contains only"""
+ chunk = self.policy.predict_action_chunk(observation)
+ if chunk.ndim != 3:
+ chunk = chunk.unsqueeze(0) # adding batch dimension, now shape is (B, chunk_size, action_dim)
+
+ return chunk[:, : self.actions_per_chunk, :]
+
+ def _predict_action_chunk(self, observation_t: TimedObservation) -> list[TimedAction]:
+ """Predict an action chunk based on an observation"""
+ inference_starts = time.perf_counter()
+
+ """1. Prepare observation"""
+ start_time = time.perf_counter()
+ observation = self._prepare_observation(observation_t)
+ preprocessing_time = time.perf_counter() - start_time
+
+ self.last_processed_obs: TimedObservation = observation_t
+
+ """2. Get action chunk"""
+ start_time = time.perf_counter()
+ action_tensor = self._get_action_chunk(observation)
+ inference_time = time.perf_counter() - start_time
+
+ """3. Post-inference processing"""
+ start_time = time.perf_counter()
+ # Move to CPU before serializing
+ action_tensor = action_tensor.cpu().squeeze(0)
+
+ action_chunk = self._time_action_chunk(
+ observation_t.get_timestamp(), list(action_tensor), observation_t.get_timestep()
+ )
+ postprocessing_time = time.perf_counter() - start_time
+ inference_stops = time.perf_counter()
+
+ self.logger.info(
+ f"Observation {observation_t.get_timestep()} |"
+ f"Inference time: {1000 * (inference_stops - inference_starts):.2f}ms"
+ )
+
+ # full-process latency breakdown for debugging purposes
+ self.logger.debug(
+ f"Observation {observation_t.get_timestep()} | "
+ f"Preprocessing time: {1000 * (preprocessing_time - inference_starts):.2f}ms | "
+ f"Inference time: {1000 * (inference_time - preprocessing_time):.2f}ms | "
+ f"Postprocessing time: {1000 * (postprocessing_time - inference_time):.2f}ms | "
+ f"Total time: {1000 * (postprocessing_time - inference_starts):.2f}ms"
+ )
+
+ return action_chunk
+
+ def stop(self):
+ """Stop the server"""
+ self._reset_server()
+ self.logger.info("Server stopping...")
+
+
+@draccus.wrap()
+def serve(cfg: PolicyServerConfig):
+ """Start the PolicyServer with the given configuration.
+
+ Args:
+ config: PolicyServerConfig instance. If None, uses default configuration.
+ """
+ logging.info(pformat(asdict(cfg)))
+
+ # Create the server instance first
+ policy_server = PolicyServer(cfg)
+
+ # Setup and start gRPC server
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
+ async_inference_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server)
+ server.add_insecure_port(f"{cfg.host}:{cfg.port}")
+
+ policy_server.logger.info(f"PolicyServer started on {cfg.host}:{cfg.port}")
+ server.start()
+
+ server.wait_for_termination()
+
+ policy_server.logger.info("Server terminated")
+
+
+if __name__ == "__main__":
+ serve()
diff --git a/src/lerobot/scripts/server/robot_client.py b/src/lerobot/scripts/server/robot_client.py
new file mode 100644
index 0000000000..68166de6f8
--- /dev/null
+++ b/src/lerobot/scripts/server/robot_client.py
@@ -0,0 +1,513 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Example command:
+```shell
+python src/lerobot/scripts/server/robot_client.py \
+ --robot.type=so100_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
+ --robot.id=black \
+ --task="dummy" \
+ --server_address=127.0.0.1:8080 \
+ --policy_type=act \
+ --pretrained_name_or_path=user/model \
+ --policy_device=mps \
+ --actions_per_chunk=50 \
+ --chunk_size_threshold=0.5 \
+ --aggregate_fn_name=weighted_average \
+ --debug_visualize_queue_size=True
+```
+"""
+
+import logging
+import pickle # nosec
+import threading
+import time
+from collections.abc import Callable
+from dataclasses import asdict
+from pprint import pformat
+from queue import Queue
+from typing import Any
+
+import draccus
+import grpc
+import torch
+
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
+from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
+from lerobot.configs.policies import PreTrainedConfig
+from lerobot.robots import ( # noqa: F401
+ Robot,
+ RobotConfig,
+ koch_follower,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.scripts.server.configs import RobotClientConfig
+from lerobot.scripts.server.constants import SUPPORTED_ROBOTS
+from lerobot.scripts.server.helpers import (
+ Action,
+ FPSTracker,
+ Observation,
+ RawObservation,
+ RemotePolicyConfig,
+ TimedAction,
+ TimedObservation,
+ get_logger,
+ map_robot_keys_to_lerobot_features,
+ send_bytes_in_chunks,
+ validate_robot_cameras_for_policy,
+ visualize_action_queue_size,
+)
+from lerobot.transport import (
+ async_inference_pb2, # type: ignore
+ async_inference_pb2_grpc, # type: ignore
+)
+from lerobot.transport.utils import grpc_channel_options
+
+
+class RobotClient:
+ prefix = "robot_client"
+ logger = get_logger(prefix)
+
+ def __init__(self, config: RobotClientConfig):
+ """Initialize RobotClient with unified configuration.
+
+ Args:
+ config: RobotClientConfig containing all configuration parameters
+ """
+ # Store configuration
+ self.config = config
+ self.robot = make_robot_from_config(config.robot)
+ self.robot.connect()
+
+ lerobot_features = map_robot_keys_to_lerobot_features(self.robot)
+
+ if config.verify_robot_cameras:
+ # Load policy config for validation
+ policy_config = PreTrainedConfig.from_pretrained(config.pretrained_name_or_path)
+ policy_image_features = policy_config.image_features
+
+ # The cameras specified for inference must match the one supported by the policy chosen
+ validate_robot_cameras_for_policy(lerobot_features, policy_image_features)
+
+ # Use environment variable if server_address is not provided in config
+ self.server_address = config.server_address
+
+ self.policy_config = RemotePolicyConfig(
+ config.policy_type,
+ config.pretrained_name_or_path,
+ lerobot_features,
+ config.actions_per_chunk,
+ config.policy_device,
+ )
+ self.channel = grpc.insecure_channel(
+ self.server_address, grpc_channel_options(initial_backoff=f"{config.environment_dt:.4f}s")
+ )
+ self.stub = async_inference_pb2_grpc.AsyncInferenceStub(self.channel)
+ self.logger.info(f"Initializing client to connect to server at {self.server_address}")
+
+ self._running_event = threading.Event()
+
+ # Initialize client side variables
+ self.latest_action_lock = threading.Lock()
+ self.latest_action = -1
+ self.action_chunk_size = -1
+
+ self._chunk_size_threshold = config.chunk_size_threshold
+
+ self.action_queue = Queue()
+ self.action_queue_lock = threading.Lock() # Protect queue operations
+ self.action_queue_size = []
+ self.start_barrier = threading.Barrier(2) # 2 threads: action receiver, control loop
+
+ # FPS measurement
+ self.fps_tracker = FPSTracker(target_fps=self.config.fps)
+
+ self.logger.info("Robot connected and ready")
+
+ # Use an event for thread-safe coordination
+ self.must_go = threading.Event()
+ self.must_go.set() # Initially set - observations qualify for direct processing
+
+ @property
+ def running(self):
+ return self._running_event.is_set()
+
+ def start(self):
+ """Start the robot client and connect to the policy server"""
+ try:
+ # client-server handshake
+ start_time = time.perf_counter()
+ self.stub.Ready(async_inference_pb2.Empty())
+ end_time = time.perf_counter()
+ self.logger.debug(f"Connected to policy server in {end_time - start_time:.4f}s")
+
+ # send policy instructions
+ policy_config_bytes = pickle.dumps(self.policy_config)
+ policy_setup = async_inference_pb2.PolicySetup(data=policy_config_bytes)
+
+ self.logger.info("Sending policy instructions to policy server")
+ self.logger.debug(
+ f"Policy type: {self.policy_config.policy_type} | "
+ f"Pretrained name or path: {self.policy_config.pretrained_name_or_path} | "
+ f"Device: {self.policy_config.device}"
+ )
+
+ self.stub.SendPolicyInstructions(policy_setup)
+
+ self._running_event.set()
+
+ return True
+
+ except grpc.RpcError as e:
+ self.logger.error(f"Failed to connect to policy server: {e}")
+ return False
+
+ def stop(self):
+ """Stop the robot client"""
+ self._running_event.clear()
+
+ self.robot.disconnect()
+ self.logger.debug("Robot disconnected")
+
+ self.channel.close()
+ self.logger.debug("Client stopped, channel closed")
+
+ def send_observation(
+ self,
+ obs: TimedObservation,
+ ) -> bool:
+ """Send observation to the policy server.
+ Returns True if the observation was sent successfully, False otherwise."""
+ if not self.running:
+ raise RuntimeError("Client not running. Run RobotClient.start() before sending observations.")
+
+ if not isinstance(obs, TimedObservation):
+ raise ValueError("Input observation needs to be a TimedObservation!")
+
+ start_time = time.perf_counter()
+ observation_bytes = pickle.dumps(obs)
+ serialize_time = time.perf_counter() - start_time
+ self.logger.debug(f"Observation serialization time: {serialize_time:.6f}s")
+
+ try:
+ observation_iterator = send_bytes_in_chunks(
+ observation_bytes,
+ async_inference_pb2.Observation,
+ log_prefix="[CLIENT] Observation",
+ silent=True,
+ )
+ _ = self.stub.SendObservations(observation_iterator)
+ obs_timestep = obs.get_timestep()
+ self.logger.info(f"Sent observation #{obs_timestep} | ")
+
+ return True
+
+ except grpc.RpcError as e:
+ self.logger.error(f"Error sending observation #{obs.get_timestep()}: {e}")
+ return False
+
+ def _inspect_action_queue(self):
+ with self.action_queue_lock:
+ queue_size = self.action_queue.qsize()
+ timestamps = sorted([action.get_timestep() for action in self.action_queue.queue])
+ self.logger.debug(f"Queue size: {queue_size}, Queue contents: {timestamps}")
+ return queue_size, timestamps
+
+ def _aggregate_action_queues(
+ self,
+ incoming_actions: list[TimedAction],
+ aggregate_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None,
+ ):
+ """Finds the same timestep actions in the queue and aggregates them using the aggregate_fn"""
+ if aggregate_fn is None:
+ # default aggregate function: take the latest action
+ def aggregate_fn(x1, x2):
+ return x2
+
+ future_action_queue = Queue()
+ with self.action_queue_lock:
+ internal_queue = self.action_queue.queue
+
+ current_action_queue = {action.get_timestep(): action.get_action() for action in internal_queue}
+
+ for new_action in incoming_actions:
+ with self.latest_action_lock:
+ latest_action = self.latest_action
+
+ # New action is older than the latest action in the queue, skip it
+ if new_action.get_timestep() <= latest_action:
+ continue
+
+ # If the new action's timestep is not in the current action queue, add it directly
+ elif new_action.get_timestep() not in current_action_queue:
+ future_action_queue.put(new_action)
+ continue
+
+ # If the new action's timestep is in the current action queue, aggregate it
+ # TODO: There is probably a way to do this with broadcasting of the two action tensors
+ future_action_queue.put(
+ TimedAction(
+ timestamp=new_action.get_timestamp(),
+ timestep=new_action.get_timestep(),
+ action=aggregate_fn(
+ current_action_queue[new_action.get_timestep()], new_action.get_action()
+ ),
+ )
+ )
+
+ with self.action_queue_lock:
+ self.action_queue = future_action_queue
+
+ def receive_actions(self, verbose: bool = False):
+ """Receive actions from the policy server"""
+ # Wait at barrier for synchronized start
+ self.start_barrier.wait()
+ self.logger.info("Action receiving thread starting")
+
+ while self.running:
+ try:
+ # Use StreamActions to get a stream of actions from the server
+ actions_chunk = self.stub.GetActions(async_inference_pb2.Empty())
+ if len(actions_chunk.data) == 0:
+ continue # received `Empty` from server, wait for next call
+
+ receive_time = time.time()
+
+ # Deserialize bytes back into list[TimedAction]
+ deserialize_start = time.perf_counter()
+ timed_actions = pickle.loads(actions_chunk.data) # nosec
+ deserialize_time = time.perf_counter() - deserialize_start
+
+ self.action_chunk_size = max(self.action_chunk_size, len(timed_actions))
+
+ # Calculate network latency if we have matching observations
+ if len(timed_actions) > 0 and verbose:
+ with self.latest_action_lock:
+ latest_action = self.latest_action
+
+ self.logger.debug(f"Current latest action: {latest_action}")
+
+ # Get queue state before changes
+ old_size, old_timesteps = self._inspect_action_queue()
+ if not old_timesteps:
+ old_timesteps = [latest_action] # queue was empty
+
+ # Get queue state before changes
+ old_size, old_timesteps = self._inspect_action_queue()
+ if not old_timesteps:
+ old_timesteps = [latest_action] # queue was empty
+
+ # Log incoming actions
+ incoming_timesteps = [a.get_timestep() for a in timed_actions]
+
+ first_action_timestep = timed_actions[0].get_timestep()
+ server_to_client_latency = (receive_time - timed_actions[0].get_timestamp()) * 1000
+
+ self.logger.info(
+ f"Received action chunk for step #{first_action_timestep} | "
+ f"Latest action: #{latest_action} | "
+ f"Incoming actions: {incoming_timesteps[0]}:{incoming_timesteps[-1]} | "
+ f"Network latency (server->client): {server_to_client_latency:.2f}ms | "
+ f"Deserialization time: {deserialize_time * 1000:.2f}ms"
+ )
+
+ # Update action queue
+ start_time = time.perf_counter()
+ self._aggregate_action_queues(timed_actions, self.config.aggregate_fn)
+ queue_update_time = time.perf_counter() - start_time
+
+ self.must_go.set() # after receiving actions, next empty queue triggers must-go processing!
+
+ if verbose:
+ # Get queue state after changes
+ new_size, new_timesteps = self._inspect_action_queue()
+
+ with self.latest_action_lock:
+ latest_action = self.latest_action
+
+ self.logger.info(
+ f"Latest action: {latest_action} | "
+ f"Old action steps: {old_timesteps[0]}:{old_timesteps[-1]} | "
+ f"Incoming action steps: {incoming_timesteps[0]}:{incoming_timesteps[-1]} | "
+ f"Updated action steps: {new_timesteps[0]}:{new_timesteps[-1]}"
+ )
+ self.logger.debug(
+ f"Queue update complete ({queue_update_time:.6f}s) | "
+ f"Before: {old_size} items | "
+ f"After: {new_size} items | "
+ )
+
+ except grpc.RpcError as e:
+ self.logger.error(f"Error receiving actions: {e}")
+
+ def actions_available(self):
+ """Check if there are actions available in the queue"""
+ with self.action_queue_lock:
+ return not self.action_queue.empty()
+
+ def _action_tensor_to_action_dict(self, action_tensor: torch.Tensor) -> dict[str, float]:
+ action = {key: action_tensor[i].item() for i, key in enumerate(self.robot.action_features)}
+ return action
+
+ def control_loop_action(self, verbose: bool = False) -> dict[str, Any]:
+ """Reading and performing actions in local queue"""
+
+ # Lock only for queue operations
+ get_start = time.perf_counter()
+ with self.action_queue_lock:
+ self.action_queue_size.append(self.action_queue.qsize())
+ # Get action from queue
+ timed_action = self.action_queue.get_nowait()
+ get_end = time.perf_counter() - get_start
+
+ _performed_action = self.robot.send_action(
+ self._action_tensor_to_action_dict(timed_action.get_action())
+ )
+ with self.latest_action_lock:
+ self.latest_action = timed_action.get_timestep()
+
+ if verbose:
+ with self.action_queue_lock:
+ current_queue_size = self.action_queue.qsize()
+
+ self.logger.debug(
+ f"Ts={timed_action.get_timestamp()} | "
+ f"Action #{timed_action.get_timestep()} performed | "
+ f"Queue size: {current_queue_size}"
+ )
+
+ self.logger.debug(
+ f"Popping action from queue to perform took {get_end:.6f}s | Queue size: {current_queue_size}"
+ )
+
+ return _performed_action
+
+ def _ready_to_send_observation(self):
+ """Flags when the client is ready to send an observation"""
+ with self.action_queue_lock:
+ return self.action_queue.qsize() / self.action_chunk_size <= self._chunk_size_threshold
+
+ def control_loop_observation(self, task: str, verbose: bool = False) -> RawObservation:
+ try:
+ # Get serialized observation bytes from the function
+ start_time = time.perf_counter()
+
+ raw_observation: RawObservation = self.robot.get_observation()
+ raw_observation["task"] = task
+
+ with self.latest_action_lock:
+ latest_action = self.latest_action
+
+ observation = TimedObservation(
+ timestamp=time.time(), # need time.time() to compare timestamps across client and server
+ observation=raw_observation,
+ timestep=max(latest_action, 0),
+ )
+
+ obs_capture_time = time.perf_counter() - start_time
+
+ # If there are no actions left in the queue, the observation must go through processing!
+ with self.action_queue_lock:
+ observation.must_go = self.must_go.is_set() and self.action_queue.empty()
+ current_queue_size = self.action_queue.qsize()
+
+ _ = self.send_observation(observation)
+
+ self.logger.debug(f"QUEUE SIZE: {current_queue_size} (Must go: {observation.must_go})")
+ if observation.must_go:
+ # must-go event will be set again after receiving actions
+ self.must_go.clear()
+
+ if verbose:
+ # Calculate comprehensive FPS metrics
+ fps_metrics = self.fps_tracker.calculate_fps_metrics(observation.get_timestamp())
+
+ self.logger.info(
+ f"Obs #{observation.get_timestep()} | "
+ f"Avg FPS: {fps_metrics['avg_fps']:.2f} | "
+ f"Target: {fps_metrics['target_fps']:.2f}"
+ )
+
+ self.logger.debug(
+ f"Ts={observation.get_timestamp():.6f} | Capturing observation took {obs_capture_time:.6f}s"
+ )
+
+ return raw_observation
+
+ except Exception as e:
+ self.logger.error(f"Error in observation sender: {e}")
+
+ def control_loop(self, task: str, verbose: bool = False) -> tuple[Observation, Action]:
+ """Combined function for executing actions and streaming observations"""
+ # Wait at barrier for synchronized start
+ self.start_barrier.wait()
+ self.logger.info("Control loop thread starting")
+
+ _performed_action = None
+ _captured_observation = None
+
+ while self.running:
+ control_loop_start = time.perf_counter()
+ """Control loop: (1) Performing actions, when available"""
+ if self.actions_available():
+ _performed_action = self.control_loop_action(verbose)
+
+ """Control loop: (2) Streaming observations to the remote policy server"""
+ if self._ready_to_send_observation():
+ _captured_observation = self.control_loop_observation(task, verbose)
+
+ self.logger.info(f"Control loop (ms): {(time.perf_counter() - control_loop_start) * 1000:.2f}")
+ # Dynamically adjust sleep time to maintain the desired control frequency
+ time.sleep(max(0, self.config.environment_dt - (time.perf_counter() - control_loop_start)))
+
+ return _captured_observation, _performed_action
+
+
+@draccus.wrap()
+def async_client(cfg: RobotClientConfig):
+ logging.info(pformat(asdict(cfg)))
+
+ if cfg.robot.type not in SUPPORTED_ROBOTS:
+ raise ValueError(f"Robot {cfg.robot.type} not yet supported!")
+
+ client = RobotClient(cfg)
+
+ if client.start():
+ client.logger.info("Starting action receiver thread...")
+
+ # Create and start action receiver thread
+ action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True)
+
+ # Start action receiver thread
+ action_receiver_thread.start()
+
+ try:
+ # The main thread runs the control loop
+ client.control_loop(task=cfg.task)
+
+ finally:
+ client.stop()
+ action_receiver_thread.join()
+ if cfg.debug_visualize_queue_size:
+ visualize_action_queue_size(client.action_queue_size)
+ client.logger.info("Client stopped")
+
+
+if __name__ == "__main__":
+ async_client() # run the client
diff --git a/src/lerobot/scripts/train.py b/src/lerobot/scripts/train.py
new file mode 100644
index 0000000000..f09d231a84
--- /dev/null
+++ b/src/lerobot/scripts/train.py
@@ -0,0 +1,291 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import time
+from contextlib import nullcontext
+from pprint import pformat
+from typing import Any
+
+import torch
+from termcolor import colored
+from torch.amp import GradScaler
+from torch.optim import Optimizer
+
+from lerobot.configs import parser
+from lerobot.configs.train import TrainPipelineConfig
+from lerobot.datasets.factory import make_dataset
+from lerobot.datasets.sampler import EpisodeAwareSampler
+from lerobot.datasets.utils import cycle
+from lerobot.envs.factory import make_env
+from lerobot.optim.factory import make_optimizer_and_scheduler
+from lerobot.policies.factory import make_policy
+from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies.utils import get_device_from_parameters
+from lerobot.scripts.eval import eval_policy
+from lerobot.utils.logging_utils import AverageMeter, MetricsTracker
+from lerobot.utils.random_utils import set_seed
+from lerobot.utils.train_utils import (
+ get_step_checkpoint_dir,
+ get_step_identifier,
+ load_training_state,
+ save_checkpoint,
+ update_last_checkpoint,
+)
+from lerobot.utils.utils import (
+ format_big_number,
+ get_safe_torch_device,
+ has_method,
+ init_logging,
+)
+from lerobot.utils.wandb_utils import WandBLogger
+
+
+def update_policy(
+ train_metrics: MetricsTracker,
+ policy: PreTrainedPolicy,
+ batch: Any,
+ optimizer: Optimizer,
+ grad_clip_norm: float,
+ grad_scaler: GradScaler,
+ lr_scheduler=None,
+ use_amp: bool = False,
+ lock=None,
+) -> tuple[MetricsTracker, dict]:
+ start_time = time.perf_counter()
+ device = get_device_from_parameters(policy)
+ policy.train()
+ with torch.autocast(device_type=device.type) if use_amp else nullcontext():
+ loss, output_dict = policy.forward(batch)
+ # TODO(rcadene): policy.unnormalize_outputs(out_dict)
+ grad_scaler.scale(loss).backward()
+
+ # Unscale the gradient of the optimizer's assigned params in-place **prior to gradient clipping**.
+ grad_scaler.unscale_(optimizer)
+
+ grad_norm = torch.nn.utils.clip_grad_norm_(
+ policy.parameters(),
+ grad_clip_norm,
+ error_if_nonfinite=False,
+ )
+
+ # Optimizer's gradients are already unscaled, so scaler.step does not unscale them,
+ # although it still skips optimizer.step() if the gradients contain infs or NaNs.
+ with lock if lock is not None else nullcontext():
+ grad_scaler.step(optimizer)
+ # Updates the scale for next iteration.
+ grad_scaler.update()
+
+ optimizer.zero_grad()
+
+ # Step through pytorch scheduler at every batch instead of epoch
+ if lr_scheduler is not None:
+ lr_scheduler.step()
+
+ if has_method(policy, "update"):
+ # To possibly update an internal buffer (for instance an Exponential Moving Average like in TDMPC).
+ policy.update()
+
+ train_metrics.loss = loss.item()
+ train_metrics.grad_norm = grad_norm.item()
+ train_metrics.lr = optimizer.param_groups[0]["lr"]
+ train_metrics.update_s = time.perf_counter() - start_time
+ return train_metrics, output_dict
+
+
+@parser.wrap()
+def train(cfg: TrainPipelineConfig):
+ cfg.validate()
+ logging.info(pformat(cfg.to_dict()))
+
+ if cfg.wandb.enable and cfg.wandb.project:
+ wandb_logger = WandBLogger(cfg)
+ else:
+ wandb_logger = None
+ logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"]))
+
+ if cfg.seed is not None:
+ set_seed(cfg.seed)
+
+ # Check device is available
+ device = get_safe_torch_device(cfg.policy.device, log=True)
+ torch.backends.cudnn.benchmark = True
+ torch.backends.cuda.matmul.allow_tf32 = True
+
+ logging.info("Creating dataset")
+ dataset = make_dataset(cfg)
+
+ # Create environment used for evaluating checkpoints during training on simulation data.
+ # On real-world data, no need to create an environment as evaluations are done outside train.py,
+ # using the eval.py instead, with gym_dora environment and dora-rs.
+ eval_env = None
+ if cfg.eval_freq > 0 and cfg.env is not None:
+ logging.info("Creating env")
+ eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
+
+ logging.info("Creating policy")
+ policy = make_policy(
+ cfg=cfg.policy,
+ ds_meta=dataset.meta,
+ )
+
+ logging.info("Creating optimizer and scheduler")
+ optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy)
+ grad_scaler = GradScaler(device.type, enabled=cfg.policy.use_amp)
+
+ step = 0 # number of policy updates (forward + backward + optim)
+
+ if cfg.resume:
+ step, optimizer, lr_scheduler = load_training_state(cfg.checkpoint_path, optimizer, lr_scheduler)
+
+ num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
+ num_total_params = sum(p.numel() for p in policy.parameters())
+
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
+ if cfg.env is not None:
+ logging.info(f"{cfg.env.task=}")
+ logging.info(f"{cfg.steps=} ({format_big_number(cfg.steps)})")
+ logging.info(f"{dataset.num_frames=} ({format_big_number(dataset.num_frames)})")
+ logging.info(f"{dataset.num_episodes=}")
+ logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})")
+ logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})")
+
+ # create dataloader for offline training
+ if hasattr(cfg.policy, "drop_n_last_frames"):
+ shuffle = False
+ sampler = EpisodeAwareSampler(
+ dataset.episode_data_index,
+ drop_n_last_frames=cfg.policy.drop_n_last_frames,
+ shuffle=True,
+ )
+ else:
+ shuffle = True
+ sampler = None
+
+ dataloader = torch.utils.data.DataLoader(
+ dataset,
+ num_workers=cfg.num_workers,
+ batch_size=cfg.batch_size,
+ shuffle=shuffle,
+ sampler=sampler,
+ pin_memory=device.type == "cuda",
+ drop_last=False,
+ )
+ dl_iter = cycle(dataloader)
+
+ policy.train()
+
+ train_metrics = {
+ "loss": AverageMeter("loss", ":.3f"),
+ "grad_norm": AverageMeter("grdn", ":.3f"),
+ "lr": AverageMeter("lr", ":0.1e"),
+ "update_s": AverageMeter("updt_s", ":.3f"),
+ "dataloading_s": AverageMeter("data_s", ":.3f"),
+ }
+
+ train_tracker = MetricsTracker(
+ cfg.batch_size, dataset.num_frames, dataset.num_episodes, train_metrics, initial_step=step
+ )
+
+ logging.info("Start offline training on a fixed dataset")
+ for _ in range(step, cfg.steps):
+ start_time = time.perf_counter()
+ batch = next(dl_iter)
+ train_tracker.dataloading_s = time.perf_counter() - start_time
+
+ for key in batch:
+ if isinstance(batch[key], torch.Tensor):
+ batch[key] = batch[key].to(device, non_blocking=device.type == "cuda")
+
+ train_tracker, output_dict = update_policy(
+ train_tracker,
+ policy,
+ batch,
+ optimizer,
+ cfg.optimizer.grad_clip_norm,
+ grad_scaler=grad_scaler,
+ lr_scheduler=lr_scheduler,
+ use_amp=cfg.policy.use_amp,
+ )
+
+ # Note: eval and checkpoint happens *after* the `step`th training update has completed, so we
+ # increment `step` here.
+ step += 1
+ train_tracker.step()
+ is_log_step = cfg.log_freq > 0 and step % cfg.log_freq == 0
+ is_saving_step = step % cfg.save_freq == 0 or step == cfg.steps
+ is_eval_step = cfg.eval_freq > 0 and step % cfg.eval_freq == 0
+
+ if is_log_step:
+ logging.info(train_tracker)
+ if wandb_logger:
+ wandb_log_dict = train_tracker.to_dict()
+ if output_dict:
+ wandb_log_dict.update(output_dict)
+ wandb_logger.log_dict(wandb_log_dict, step)
+ train_tracker.reset_averages()
+
+ if cfg.save_checkpoint and is_saving_step:
+ logging.info(f"Checkpoint policy after step {step}")
+ checkpoint_dir = get_step_checkpoint_dir(cfg.output_dir, cfg.steps, step)
+ save_checkpoint(checkpoint_dir, step, cfg, policy, optimizer, lr_scheduler)
+ update_last_checkpoint(checkpoint_dir)
+ if wandb_logger:
+ wandb_logger.log_policy(checkpoint_dir)
+
+ if cfg.env and is_eval_step:
+ step_id = get_step_identifier(step, cfg.steps)
+ logging.info(f"Eval policy at step {step}")
+ with (
+ torch.no_grad(),
+ torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext(),
+ ):
+ eval_info = eval_policy(
+ eval_env,
+ policy,
+ cfg.eval.n_episodes,
+ videos_dir=cfg.output_dir / "eval" / f"videos_step_{step_id}",
+ max_episodes_rendered=4,
+ start_seed=cfg.seed,
+ )
+
+ eval_metrics = {
+ "avg_sum_reward": AverageMeter("∑rwrd", ":.3f"),
+ "pc_success": AverageMeter("success", ":.1f"),
+ "eval_s": AverageMeter("eval_s", ":.3f"),
+ }
+ eval_tracker = MetricsTracker(
+ cfg.batch_size, dataset.num_frames, dataset.num_episodes, eval_metrics, initial_step=step
+ )
+ eval_tracker.eval_s = eval_info["aggregated"].pop("eval_s")
+ eval_tracker.avg_sum_reward = eval_info["aggregated"].pop("avg_sum_reward")
+ eval_tracker.pc_success = eval_info["aggregated"].pop("pc_success")
+ logging.info(eval_tracker)
+ if wandb_logger:
+ wandb_log_dict = {**eval_tracker.to_dict(), **eval_info}
+ wandb_logger.log_dict(wandb_log_dict, step, mode="eval")
+ wandb_logger.log_video(eval_info["video_paths"][0], step, mode="eval")
+
+ if eval_env:
+ eval_env.close()
+ logging.info("End of training")
+
+ if cfg.policy.push_to_hub:
+ policy.push_model_to_hub(cfg)
+
+
+if __name__ == "__main__":
+ init_logging()
+ train()
diff --git a/lerobot/scripts/visualize_dataset.py b/src/lerobot/scripts/visualize_dataset.py
similarity index 91%
rename from lerobot/scripts/visualize_dataset.py
rename to src/lerobot/scripts/visualize_dataset.py
index ca17640723..51ead0dd10 100644
--- a/lerobot/scripts/visualize_dataset.py
+++ b/src/lerobot/scripts/visualize_dataset.py
@@ -29,14 +29,14 @@
- Visualize data stored on a local machine:
```
-local$ python lerobot/scripts/visualize_dataset.py \
+local$ python -m lerobot.scripts.visualize_dataset \
--repo-id lerobot/pusht \
--episode-index 0
```
- Visualize data stored on a distant machine with a local viewer:
```
-distant$ python lerobot/scripts/visualize_dataset.py \
+distant$ python -m lerobot.scripts.visualize_dataset \
--repo-id lerobot/pusht \
--episode-index 0 \
--save 1 \
@@ -50,7 +50,7 @@
(You need to forward the websocket port to the distant machine, with
`ssh -L 9087:localhost:9087 username@remote-host`)
```
-distant$ python lerobot/scripts/visualize_dataset.py \
+distant$ python -m lerobot.scripts.visualize_dataset \
--repo-id lerobot/pusht \
--episode-index 0 \
--mode distant \
@@ -65,8 +65,8 @@
import gc
import logging
import time
+from collections.abc import Iterator
from pathlib import Path
-from typing import Iterator
import numpy as np
import rerun as rr
@@ -74,7 +74,7 @@
import torch.utils.data
import tqdm
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
class EpisodeSampler(torch.utils.data.Sampler):
@@ -111,9 +111,9 @@ def visualize_dataset(
output_dir: Path | None = None,
) -> Path | None:
if save:
- assert (
- output_dir is not None
- ), "Set an output directory where to write .rrd files with `--output-dir path/to/directory`."
+ assert output_dir is not None, (
+ "Set an output directory where to write .rrd files with `--output-dir path/to/directory`."
+ )
repo_id = dataset.repo_id
@@ -207,12 +207,6 @@ def main():
required=True,
help="Episode to visualize.",
)
- parser.add_argument(
- "--local-files-only",
- type=int,
- default=0,
- help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.",
- )
parser.add_argument(
"--root",
type=Path,
@@ -271,14 +265,25 @@ def main():
),
)
+ parser.add_argument(
+ "--tolerance-s",
+ type=float,
+ default=1e-4,
+ help=(
+ "Tolerance in seconds used to ensure data timestamps respect the dataset fps value"
+ "This is argument passed to the constructor of LeRobotDataset and maps to its tolerance_s constructor argument"
+ "If not given, defaults to 1e-4."
+ ),
+ )
+
args = parser.parse_args()
kwargs = vars(args)
repo_id = kwargs.pop("repo_id")
root = kwargs.pop("root")
- local_files_only = kwargs.pop("local_files_only")
+ tolerance_s = kwargs.pop("tolerance_s")
logging.info("Loading dataset")
- dataset = LeRobotDataset(repo_id, root=root, local_files_only=local_files_only)
+ dataset = LeRobotDataset(repo_id, root=root, tolerance_s=tolerance_s)
visualize_dataset(dataset, **vars(args))
diff --git a/lerobot/scripts/visualize_dataset_html.py b/src/lerobot/scripts/visualize_dataset_html.py
similarity index 88%
rename from lerobot/scripts/visualize_dataset_html.py
rename to src/lerobot/scripts/visualize_dataset_html.py
index cc3f39308d..a722da6036 100644
--- a/lerobot/scripts/visualize_dataset_html.py
+++ b/src/lerobot/scripts/visualize_dataset_html.py
@@ -29,7 +29,7 @@
- Visualize data stored on a local machine:
```bash
-local$ python lerobot/scripts/visualize_dataset_html.py \
+local$ python -m lerobot.scripts.visualize_dataset_html \
--repo-id lerobot/pusht
local$ open http://localhost:9090
@@ -37,7 +37,7 @@
- Visualize data stored on a distant machine with a local viewer:
```bash
-distant$ python lerobot/scripts/visualize_dataset_html.py \
+distant$ python -m lerobot.scripts.visualize_dataset_html \
--repo-id lerobot/pusht
local$ ssh -L 9090:localhost:9090 distant # create a ssh tunnel
@@ -46,7 +46,7 @@
- Select episodes to visualize:
```bash
-python lerobot/scripts/visualize_dataset_html.py \
+python -m lerobot.scripts.visualize_dataset_html \
--repo-id lerobot/pusht \
--episodes 7 3 5 1 4
```
@@ -68,9 +68,9 @@
from flask import Flask, redirect, render_template, request, url_for
from lerobot import available_datasets
-from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.common.datasets.utils import IterableNamespace
-from lerobot.common.utils.utils import init_logging
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.utils import IterableNamespace
+from lerobot.utils.utils import init_logging
def run_server(
@@ -150,7 +150,7 @@ def show_episode(dataset_namespace, dataset_name, episode_id, dataset=dataset, e
400,
)
dataset_version = (
- dataset.meta._version if isinstance(dataset, LeRobotDataset) else dataset.codebase_version
+ str(dataset.meta._version) if isinstance(dataset, LeRobotDataset) else dataset.codebase_version
)
match = re.search(r"v(\d+)\.", dataset_version)
if match:
@@ -158,7 +158,7 @@ def show_episode(dataset_namespace, dataset_name, episode_id, dataset=dataset, e
if major_version < 2:
return "Make sure to convert your LeRobotDataset to v2 & above."
- episode_data_csv_str, columns = get_episode_data(dataset, episode_id)
+ episode_data_csv_str, columns, ignored_columns = get_episode_data(dataset, episode_id)
dataset_info = {
"repo_id": f"{dataset_namespace}/{dataset_name}",
"num_samples": dataset.num_frames
@@ -174,7 +174,10 @@ def show_episode(dataset_namespace, dataset_name, episode_id, dataset=dataset, e
dataset.meta.get_video_file_path(episode_id, key) for key in dataset.meta.video_keys
]
videos_info = [
- {"url": url_for("static", filename=video_path), "filename": video_path.parent.name}
+ {
+ "url": url_for("static", filename=str(video_path).replace("\\", "/")),
+ "filename": video_path.parent.name,
+ }
for video_path in video_paths
]
tasks = dataset.meta.episodes[episode_id]["tasks"]
@@ -194,7 +197,7 @@ def show_episode(dataset_namespace, dataset_name, episode_id, dataset=dataset, e
]
response = requests.get(
- f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/episodes.jsonl"
+ f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/episodes.jsonl", timeout=5
)
response.raise_for_status()
# Split into lines and parse each line as JSON
@@ -218,6 +221,7 @@ def show_episode(dataset_namespace, dataset_name, episode_id, dataset=dataset, e
videos_info=videos_info,
episode_data_csv_str=episode_data_csv_str,
columns=columns,
+ ignored_columns=ignored_columns,
)
app.run(host=host, port=port)
@@ -233,9 +237,17 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
This file will be loaded by Dygraph javascript to plot data in real time."""
columns = []
- selected_columns = [col for col, ft in dataset.features.items() if ft["dtype"] == "float32"]
+ selected_columns = [col for col, ft in dataset.features.items() if ft["dtype"] in ["float32", "int32"]]
selected_columns.remove("timestamp")
+ ignored_columns = []
+ for column_name in selected_columns:
+ shape = dataset.features[column_name]["shape"]
+ shape_dim = len(shape)
+ if shape_dim > 1:
+ selected_columns.remove(column_name)
+ ignored_columns.append(column_name)
+
# init header of csv with state and action names
header = ["timestamp"]
@@ -245,16 +257,17 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
if isinstance(dataset, LeRobotDataset)
else dataset.features[column_name].shape[0]
)
- header += [f"{column_name}_{i}" for i in range(dim_state)]
if "names" in dataset.features[column_name] and dataset.features[column_name]["names"]:
column_names = dataset.features[column_name]["names"]
while not isinstance(column_names, list):
column_names = list(column_names.values())[0]
else:
- column_names = [f"motor_{i}" for i in range(dim_state)]
+ column_names = [f"{column_name}_{i}" for i in range(dim_state)]
columns.append({"key": column_name, "value": column_names})
+ header += column_names
+
selected_columns.insert(0, "timestamp")
if isinstance(dataset, LeRobotDataset):
@@ -290,7 +303,7 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
csv_writer.writerows(rows)
csv_string = csv_buffer.getvalue()
- return csv_string, columns
+ return csv_string, columns, ignored_columns
def get_episode_video_paths(dataset: LeRobotDataset, ep_index: int) -> list[str]:
@@ -317,7 +330,9 @@ def get_episode_language_instruction(dataset: LeRobotDataset, ep_index: int) ->
def get_dataset_info(repo_id: str) -> IterableNamespace:
- response = requests.get(f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/info.json")
+ response = requests.get(
+ f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/info.json", timeout=5
+ )
response.raise_for_status() # Raises an HTTPError for bad responses
dataset_info = response.json()
dataset_info["repo_id"] = repo_id
@@ -364,12 +379,12 @@ def visualize_dataset_html(
template_folder=template_dir,
)
else:
- # Create a simlink from the dataset video folder containg mp4 files to the output directory
+ # Create a simlink from the dataset video folder containing mp4 files to the output directory
# so that the http server can get access to the mp4 files.
if isinstance(dataset, LeRobotDataset):
ln_videos_dir = static_dir / "videos"
if not ln_videos_dir.exists():
- ln_videos_dir.symlink_to((dataset.root / "videos").resolve())
+ ln_videos_dir.symlink_to((dataset.root / "videos").resolve().as_posix())
if serve:
run_server(dataset, episodes, host, port, static_dir, template_dir)
@@ -384,12 +399,6 @@ def main():
default=None,
help="Name of hugging face repositery containing a LeRobotDataset dataset (e.g. `lerobot/pusht` for https://huggingface.co/datasets/lerobot/pusht).",
)
- parser.add_argument(
- "--local-files-only",
- type=int,
- default=0,
- help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.",
- )
parser.add_argument(
"--root",
type=Path,
@@ -440,17 +449,28 @@ def main():
help="Delete the output directory if it exists already.",
)
+ parser.add_argument(
+ "--tolerance-s",
+ type=float,
+ default=1e-4,
+ help=(
+ "Tolerance in seconds used to ensure data timestamps respect the dataset fps value"
+ "This is argument passed to the constructor of LeRobotDataset and maps to its tolerance_s constructor argument"
+ "If not given, defaults to 1e-4."
+ ),
+ )
+
args = parser.parse_args()
kwargs = vars(args)
repo_id = kwargs.pop("repo_id")
load_from_hf_hub = kwargs.pop("load_from_hf_hub")
root = kwargs.pop("root")
- local_files_only = kwargs.pop("local_files_only")
+ tolerance_s = kwargs.pop("tolerance_s")
dataset = None
if repo_id:
dataset = (
- LeRobotDataset(repo_id, root=root, local_files_only=local_files_only)
+ LeRobotDataset(repo_id, root=root, tolerance_s=tolerance_s)
if not load_from_hf_hub
else get_dataset_info(repo_id)
)
diff --git a/src/lerobot/scripts/visualize_image_transforms.py b/src/lerobot/scripts/visualize_image_transforms.py
new file mode 100644
index 0000000000..14caf89df0
--- /dev/null
+++ b/src/lerobot/scripts/visualize_image_transforms.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Visualize effects of image transforms for a given configuration.
+
+This script will generate examples of transformed images as they are output by LeRobot dataset.
+Additionally, each individual transform can be visualized separately as well as examples of combined transforms
+
+Example:
+```bash
+python -m lerobot.scripts.visualize_image_transforms \
+ --repo_id=lerobot/pusht \
+ --episodes='[0]' \
+ --image_transforms.enable=True
+```
+"""
+
+import logging
+from copy import deepcopy
+from dataclasses import replace
+from pathlib import Path
+
+import draccus
+from torchvision.transforms import ToPILImage
+
+from lerobot.configs.default import DatasetConfig
+from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets.transforms import (
+ ImageTransforms,
+ ImageTransformsConfig,
+ make_transform_from_config,
+)
+
+OUTPUT_DIR = Path("outputs/image_transforms")
+to_pil = ToPILImage()
+
+
+def save_all_transforms(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples):
+ output_dir_all = output_dir / "all"
+ output_dir_all.mkdir(parents=True, exist_ok=True)
+
+ tfs = ImageTransforms(cfg)
+ for i in range(1, n_examples + 1):
+ transformed_frame = tfs(original_frame)
+ to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100)
+
+ print("Combined transforms examples saved to:")
+ print(f" {output_dir_all}")
+
+
+def save_each_transform(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples):
+ if not cfg.enable:
+ logging.warning(
+ "No single transforms will be saved, because `image_transforms.enable=False`. To enable, set `enable` to True in `ImageTransformsConfig` or in the command line with `--image_transforms.enable=True`."
+ )
+ return
+
+ print("Individual transforms examples saved to:")
+ for tf_name, tf_cfg in cfg.tfs.items():
+ # Apply a few transformation with random value in min_max range
+ output_dir_single = output_dir / tf_name
+ output_dir_single.mkdir(parents=True, exist_ok=True)
+
+ tf = make_transform_from_config(tf_cfg)
+ for i in range(1, n_examples + 1):
+ transformed_frame = tf(original_frame)
+ to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100)
+
+ # Apply min, max, average transformations
+ tf_cfg_kwgs_min = deepcopy(tf_cfg.kwargs)
+ tf_cfg_kwgs_max = deepcopy(tf_cfg.kwargs)
+ tf_cfg_kwgs_avg = deepcopy(tf_cfg.kwargs)
+
+ for key, (min_, max_) in tf_cfg.kwargs.items():
+ avg = (min_ + max_) / 2
+ tf_cfg_kwgs_min[key] = [min_, min_]
+ tf_cfg_kwgs_max[key] = [max_, max_]
+ tf_cfg_kwgs_avg[key] = [avg, avg]
+
+ tf_min = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_min}))
+ tf_max = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_max}))
+ tf_avg = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_avg}))
+
+ tf_frame_min = tf_min(original_frame)
+ tf_frame_max = tf_max(original_frame)
+ tf_frame_avg = tf_avg(original_frame)
+
+ to_pil(tf_frame_min).save(output_dir_single / "min.png", quality=100)
+ to_pil(tf_frame_max).save(output_dir_single / "max.png", quality=100)
+ to_pil(tf_frame_avg).save(output_dir_single / "mean.png", quality=100)
+
+ print(f" {output_dir_single}")
+
+
+@draccus.wrap()
+def visualize_image_transforms(cfg: DatasetConfig, output_dir: Path = OUTPUT_DIR, n_examples: int = 5):
+ dataset = LeRobotDataset(
+ repo_id=cfg.repo_id,
+ episodes=cfg.episodes,
+ revision=cfg.revision,
+ video_backend=cfg.video_backend,
+ )
+
+ output_dir = output_dir / cfg.repo_id.split("/")[-1]
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Get 1st frame from 1st camera of 1st episode
+ original_frame = dataset[0][dataset.meta.camera_keys[0]]
+ to_pil(original_frame).save(output_dir / "original_frame.png", quality=100)
+ print("\nOriginal frame saved to:")
+ print(f" {output_dir / 'original_frame.png'}.")
+
+ save_all_transforms(cfg.image_transforms, original_frame, output_dir, n_examples)
+ save_each_transform(cfg.image_transforms, original_frame, output_dir, n_examples)
+
+
+if __name__ == "__main__":
+ visualize_image_transforms()
diff --git a/src/lerobot/setup_motors.py b/src/lerobot/setup_motors.py
new file mode 100644
index 0000000000..c54582a1d1
--- /dev/null
+++ b/src/lerobot/setup_motors.py
@@ -0,0 +1,84 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Helper to set motor ids and baudrate.
+
+Example:
+
+```shell
+python -m lerobot.setup_motors \
+ --teleop.type=so100_leader \
+ --teleop.port=/dev/tty.usbmodem575E0031751
+```
+"""
+
+from dataclasses import dataclass
+
+import draccus
+
+from lerobot.robots import ( # noqa: F401
+ RobotConfig,
+ koch_follower,
+ lekiwi,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.teleoperators import ( # noqa: F401
+ TeleoperatorConfig,
+ koch_leader,
+ make_teleoperator_from_config,
+ so100_leader,
+ so101_leader,
+)
+
+COMPATIBLE_DEVICES = [
+ "koch_follower",
+ "koch_leader",
+ "so100_follower",
+ "so100_leader",
+ "so101_follower",
+ "so101_leader",
+ "lekiwi",
+]
+
+
+@dataclass
+class SetupConfig:
+ teleop: TeleoperatorConfig | None = None
+ robot: RobotConfig | None = None
+
+ def __post_init__(self):
+ if bool(self.teleop) == bool(self.robot):
+ raise ValueError("Choose either a teleop or a robot.")
+
+ self.device = self.robot if self.robot else self.teleop
+
+
+@draccus.wrap()
+def setup_motors(cfg: SetupConfig):
+ if cfg.device.type not in COMPATIBLE_DEVICES:
+ raise NotImplementedError
+
+ if isinstance(cfg.device, RobotConfig):
+ device = make_robot_from_config(cfg.device)
+ else:
+ device = make_teleoperator_from_config(cfg.device)
+
+ device.setup_motors()
+
+
+if __name__ == "__main__":
+ setup_motors()
diff --git a/src/lerobot/teleoperate.py b/src/lerobot/teleoperate.py
new file mode 100644
index 0000000000..9836f1393c
--- /dev/null
+++ b/src/lerobot/teleoperate.py
@@ -0,0 +1,157 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Simple script to control a robot from teleoperation.
+
+Example:
+
+```shell
+python -m lerobot.teleoperate \
+ --robot.type=so101_follower \
+ --robot.port=/dev/tty.usbmodem58760431541 \
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
+ --robot.id=black \
+ --teleop.type=so101_leader \
+ --teleop.port=/dev/tty.usbmodem58760431551 \
+ --teleop.id=blue \
+ --display_data=true
+```
+
+Example teleoperation with bimanual so100:
+
+```shell
+python -m lerobot.teleoperate \
+ --robot.type=bi_so100_follower \
+ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
+ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
+ --robot.id=bimanual_follower \
+ --robot.cameras='{
+ left: {"type": "opencv", "index_or_path": 0, "width": 1920, "height": 1080, "fps": 30},
+ top: {"type": "opencv", "index_or_path": 1, "width": 1920, "height": 1080, "fps": 30},
+ right: {"type": "opencv", "index_or_path": 2, "width": 1920, "height": 1080, "fps": 30}
+ }' \
+ --teleop.type=bi_so100_leader \
+ --teleop.left_arm_port=/dev/tty.usbmodem5A460828611 \
+ --teleop.right_arm_port=/dev/tty.usbmodem5A460826981 \
+ --teleop.id=bimanual_leader \
+ --display_data=true
+```
+
+"""
+
+import logging
+import time
+from dataclasses import asdict, dataclass
+from pprint import pformat
+
+import draccus
+import rerun as rr
+
+from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
+from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
+from lerobot.robots import ( # noqa: F401
+ Robot,
+ RobotConfig,
+ bi_so100_follower,
+ hope_jr,
+ koch_follower,
+ make_robot_from_config,
+ so100_follower,
+ so101_follower,
+)
+from lerobot.teleoperators import ( # noqa: F401
+ Teleoperator,
+ TeleoperatorConfig,
+ bi_so100_leader,
+ gamepad,
+ homunculus,
+ koch_leader,
+ make_teleoperator_from_config,
+ so100_leader,
+ so101_leader,
+)
+from lerobot.utils.robot_utils import busy_wait
+from lerobot.utils.utils import init_logging, move_cursor_up
+from lerobot.utils.visualization_utils import _init_rerun, log_rerun_data
+
+
+@dataclass
+class TeleoperateConfig:
+ # TODO: pepijn, steven: if more robots require multiple teleoperators (like lekiwi) its good to make this possibele in teleop.py and record.py with List[Teleoperator]
+ teleop: TeleoperatorConfig
+ robot: RobotConfig
+ # Limit the maximum frames per second.
+ fps: int = 60
+ teleop_time_s: float | None = None
+ # Display all cameras on screen
+ display_data: bool = False
+
+
+def teleop_loop(
+ teleop: Teleoperator, robot: Robot, fps: int, display_data: bool = False, duration: float | None = None
+):
+ display_len = max(len(key) for key in robot.action_features)
+ start = time.perf_counter()
+ while True:
+ loop_start = time.perf_counter()
+ action = teleop.get_action()
+ if display_data:
+ observation = robot.get_observation()
+ log_rerun_data(observation, action)
+
+ robot.send_action(action)
+ dt_s = time.perf_counter() - loop_start
+ busy_wait(1 / fps - dt_s)
+
+ loop_s = time.perf_counter() - loop_start
+
+ print("\n" + "-" * (display_len + 10))
+ print(f"{'NAME':<{display_len}} | {'NORM':>7}")
+ for motor, value in action.items():
+ print(f"{motor:<{display_len}} | {value:>7.2f}")
+ print(f"\ntime: {loop_s * 1e3:.2f}ms ({1 / loop_s:.0f} Hz)")
+
+ if duration is not None and time.perf_counter() - start >= duration:
+ return
+
+ move_cursor_up(len(action) + 5)
+
+
+@draccus.wrap()
+def teleoperate(cfg: TeleoperateConfig):
+ init_logging()
+ logging.info(pformat(asdict(cfg)))
+ if cfg.display_data:
+ _init_rerun(session_name="teleoperation")
+
+ teleop = make_teleoperator_from_config(cfg.teleop)
+ robot = make_robot_from_config(cfg.robot)
+
+ teleop.connect()
+ robot.connect()
+
+ try:
+ teleop_loop(teleop, robot, cfg.fps, display_data=cfg.display_data, duration=cfg.teleop_time_s)
+ except KeyboardInterrupt:
+ pass
+ finally:
+ if cfg.display_data:
+ rr.rerun_shutdown()
+ teleop.disconnect()
+ robot.disconnect()
+
+
+if __name__ == "__main__":
+ teleoperate()
diff --git a/src/lerobot/teleoperators/__init__.py b/src/lerobot/teleoperators/__init__.py
new file mode 100644
index 0000000000..56f48af7e8
--- /dev/null
+++ b/src/lerobot/teleoperators/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config import TeleoperatorConfig
+from .teleoperator import Teleoperator
+from .utils import make_teleoperator_from_config
diff --git a/src/lerobot/teleoperators/bi_so100_leader/__init__.py b/src/lerobot/teleoperators/bi_so100_leader/__init__.py
new file mode 100644
index 0000000000..34313a61e6
--- /dev/null
+++ b/src/lerobot/teleoperators/bi_so100_leader/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .bi_so100_leader import BiSO100Leader
+from .config_bi_so100_leader import BiSO100LeaderConfig
diff --git a/src/lerobot/teleoperators/bi_so100_leader/bi_so100_leader.py b/src/lerobot/teleoperators/bi_so100_leader/bi_so100_leader.py
new file mode 100644
index 0000000000..7696696557
--- /dev/null
+++ b/src/lerobot/teleoperators/bi_so100_leader/bi_so100_leader.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from functools import cached_property
+
+from lerobot.teleoperators.so100_leader.config_so100_leader import SO100LeaderConfig
+from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader
+
+from ..teleoperator import Teleoperator
+from .config_bi_so100_leader import BiSO100LeaderConfig
+
+logger = logging.getLogger(__name__)
+
+
+class BiSO100Leader(Teleoperator):
+ """
+ [Bimanual SO-100 Leader Arms](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio
+ This bimanual leader arm can also be easily adapted to use SO-101 leader arms, just replace the SO100Leader class with SO101Leader and SO100LeaderConfig with SO101LeaderConfig.
+ """
+
+ config_class = BiSO100LeaderConfig
+ name = "bi_so100_leader"
+
+ def __init__(self, config: BiSO100LeaderConfig):
+ super().__init__(config)
+ self.config = config
+
+ left_arm_config = SO100LeaderConfig(
+ id=f"{config.id}_left" if config.id else None,
+ calibration_dir=config.calibration_dir,
+ port=config.left_arm_port,
+ )
+
+ right_arm_config = SO100LeaderConfig(
+ id=f"{config.id}_right" if config.id else None,
+ calibration_dir=config.calibration_dir,
+ port=config.right_arm_port,
+ )
+
+ self.left_arm = SO100Leader(left_arm_config)
+ self.right_arm = SO100Leader(right_arm_config)
+
+ @cached_property
+ def action_features(self) -> dict[str, type]:
+ return {f"left_{motor}.pos": float for motor in self.left_arm.bus.motors} | {
+ f"right_{motor}.pos": float for motor in self.right_arm.bus.motors
+ }
+
+ @cached_property
+ def feedback_features(self) -> dict[str, type]:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ return self.left_arm.is_connected and self.right_arm.is_connected
+
+ def connect(self, calibrate: bool = True) -> None:
+ self.left_arm.connect(calibrate)
+ self.right_arm.connect(calibrate)
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.left_arm.is_calibrated and self.right_arm.is_calibrated
+
+ def calibrate(self) -> None:
+ self.left_arm.calibrate()
+ self.right_arm.calibrate()
+
+ def configure(self) -> None:
+ self.left_arm.configure()
+ self.right_arm.configure()
+
+ def setup_motors(self) -> None:
+ self.left_arm.setup_motors()
+ self.right_arm.setup_motors()
+
+ def get_action(self) -> dict[str, float]:
+ action_dict = {}
+
+ # Add "left_" prefix
+ left_action = self.left_arm.get_action()
+ action_dict.update({f"left_{key}": value for key, value in left_action.items()})
+
+ # Add "right_" prefix
+ right_action = self.right_arm.get_action()
+ action_dict.update({f"right_{key}": value for key, value in right_action.items()})
+
+ return action_dict
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ # Remove "left_" prefix
+ left_feedback = {
+ key.removeprefix("left_"): value for key, value in feedback.items() if key.startswith("left_")
+ }
+ # Remove "right_" prefix
+ right_feedback = {
+ key.removeprefix("right_"): value for key, value in feedback.items() if key.startswith("right_")
+ }
+
+ if left_feedback:
+ self.left_arm.send_feedback(left_feedback)
+ if right_feedback:
+ self.right_arm.send_feedback(right_feedback)
+
+ def disconnect(self) -> None:
+ self.left_arm.disconnect()
+ self.right_arm.disconnect()
diff --git a/src/lerobot/teleoperators/bi_so100_leader/config_bi_so100_leader.py b/src/lerobot/teleoperators/bi_so100_leader/config_bi_so100_leader.py
new file mode 100644
index 0000000000..117e099131
--- /dev/null
+++ b/src/lerobot/teleoperators/bi_so100_leader/config_bi_so100_leader.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("bi_so100_leader")
+@dataclass
+class BiSO100LeaderConfig(TeleoperatorConfig):
+ left_arm_port: str
+ right_arm_port: str
diff --git a/src/lerobot/teleoperators/config.py b/src/lerobot/teleoperators/config.py
new file mode 100644
index 0000000000..1b42b4edbe
--- /dev/null
+++ b/src/lerobot/teleoperators/config.py
@@ -0,0 +1,31 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+from dataclasses import dataclass
+from pathlib import Path
+
+import draccus
+
+
+@dataclass(kw_only=True)
+class TeleoperatorConfig(draccus.ChoiceRegistry, abc.ABC):
+ # Allows to distinguish between different teleoperators of the same type
+ id: str | None = None
+ # Directory to store calibration file
+ calibration_dir: Path | None = None
+
+ @property
+ def type(self) -> str:
+ return self.get_choice_name(self.__class__)
diff --git a/src/lerobot/teleoperators/gamepad/__init__.py b/src/lerobot/teleoperators/gamepad/__init__.py
new file mode 100644
index 0000000000..6f9f7fbd91
--- /dev/null
+++ b/src/lerobot/teleoperators/gamepad/__init__.py
@@ -0,0 +1,18 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .configuration_gamepad import GamepadTeleopConfig
+from .teleop_gamepad import GamepadTeleop
diff --git a/src/lerobot/teleoperators/gamepad/configuration_gamepad.py b/src/lerobot/teleoperators/gamepad/configuration_gamepad.py
new file mode 100644
index 0000000000..b3a565c072
--- /dev/null
+++ b/src/lerobot/teleoperators/gamepad/configuration_gamepad.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("gamepad")
+@dataclass
+class GamepadTeleopConfig(TeleoperatorConfig):
+ use_gripper: bool = True
diff --git a/src/lerobot/teleoperators/gamepad/gamepad_utils.py b/src/lerobot/teleoperators/gamepad/gamepad_utils.py
new file mode 100644
index 0000000000..9b62dc666e
--- /dev/null
+++ b/src/lerobot/teleoperators/gamepad/gamepad_utils.py
@@ -0,0 +1,480 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+
+class InputController:
+ """Base class for input controllers that generate motion deltas."""
+
+ def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0):
+ """
+ Initialize the controller.
+
+ Args:
+ x_step_size: Base movement step size in meters
+ y_step_size: Base movement step size in meters
+ z_step_size: Base movement step size in meters
+ """
+ self.x_step_size = x_step_size
+ self.y_step_size = y_step_size
+ self.z_step_size = z_step_size
+ self.running = True
+ self.episode_end_status = None # None, "success", or "failure"
+ self.intervention_flag = False
+ self.open_gripper_command = False
+ self.close_gripper_command = False
+
+ def start(self):
+ """Start the controller and initialize resources."""
+ pass
+
+ def stop(self):
+ """Stop the controller and release resources."""
+ pass
+
+ def get_deltas(self):
+ """Get the current movement deltas (dx, dy, dz) in meters."""
+ return 0.0, 0.0, 0.0
+
+ def should_quit(self):
+ """Return True if the user has requested to quit."""
+ return not self.running
+
+ def update(self):
+ """Update controller state - call this once per frame."""
+ pass
+
+ def __enter__(self):
+ """Support for use in 'with' statements."""
+ self.start()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Ensure resources are released when exiting 'with' block."""
+ self.stop()
+
+ def get_episode_end_status(self):
+ """
+ Get the current episode end status.
+
+ Returns:
+ None if episode should continue, "success" or "failure" otherwise
+ """
+ status = self.episode_end_status
+ self.episode_end_status = None # Reset after reading
+ return status
+
+ def should_intervene(self):
+ """Return True if intervention flag was set."""
+ return self.intervention_flag
+
+ def gripper_command(self):
+ """Return the current gripper command."""
+ if self.open_gripper_command == self.close_gripper_command:
+ return "stay"
+ elif self.open_gripper_command:
+ return "open"
+ elif self.close_gripper_command:
+ return "close"
+
+
+class KeyboardController(InputController):
+ """Generate motion deltas from keyboard input."""
+
+ def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0):
+ super().__init__(x_step_size, y_step_size, z_step_size)
+ self.key_states = {
+ "forward_x": False,
+ "backward_x": False,
+ "forward_y": False,
+ "backward_y": False,
+ "forward_z": False,
+ "backward_z": False,
+ "quit": False,
+ "success": False,
+ "failure": False,
+ }
+ self.listener = None
+
+ def start(self):
+ """Start the keyboard listener."""
+ from pynput import keyboard
+
+ def on_press(key):
+ try:
+ if key == keyboard.Key.up:
+ self.key_states["forward_x"] = True
+ elif key == keyboard.Key.down:
+ self.key_states["backward_x"] = True
+ elif key == keyboard.Key.left:
+ self.key_states["forward_y"] = True
+ elif key == keyboard.Key.right:
+ self.key_states["backward_y"] = True
+ elif key == keyboard.Key.shift:
+ self.key_states["backward_z"] = True
+ elif key == keyboard.Key.shift_r:
+ self.key_states["forward_z"] = True
+ elif key == keyboard.Key.esc:
+ self.key_states["quit"] = True
+ self.running = False
+ return False
+ elif key == keyboard.Key.enter:
+ self.key_states["success"] = True
+ self.episode_end_status = "success"
+ elif key == keyboard.Key.backspace:
+ self.key_states["failure"] = True
+ self.episode_end_status = "failure"
+ except AttributeError:
+ pass
+
+ def on_release(key):
+ try:
+ if key == keyboard.Key.up:
+ self.key_states["forward_x"] = False
+ elif key == keyboard.Key.down:
+ self.key_states["backward_x"] = False
+ elif key == keyboard.Key.left:
+ self.key_states["forward_y"] = False
+ elif key == keyboard.Key.right:
+ self.key_states["backward_y"] = False
+ elif key == keyboard.Key.shift:
+ self.key_states["backward_z"] = False
+ elif key == keyboard.Key.shift_r:
+ self.key_states["forward_z"] = False
+ elif key == keyboard.Key.enter:
+ self.key_states["success"] = False
+ elif key == keyboard.Key.backspace:
+ self.key_states["failure"] = False
+ except AttributeError:
+ pass
+
+ self.listener = keyboard.Listener(on_press=on_press, on_release=on_release)
+ self.listener.start()
+
+ print("Keyboard controls:")
+ print(" Arrow keys: Move in X-Y plane")
+ print(" Shift and Shift_R: Move in Z axis")
+ print(" Enter: End episode with SUCCESS")
+ print(" Backspace: End episode with FAILURE")
+ print(" ESC: Exit")
+
+ def stop(self):
+ """Stop the keyboard listener."""
+ if self.listener and self.listener.is_alive():
+ self.listener.stop()
+
+ def get_deltas(self):
+ """Get the current movement deltas from keyboard state."""
+ delta_x = delta_y = delta_z = 0.0
+
+ if self.key_states["forward_x"]:
+ delta_x += self.x_step_size
+ if self.key_states["backward_x"]:
+ delta_x -= self.x_step_size
+ if self.key_states["forward_y"]:
+ delta_y += self.y_step_size
+ if self.key_states["backward_y"]:
+ delta_y -= self.y_step_size
+ if self.key_states["forward_z"]:
+ delta_z += self.z_step_size
+ if self.key_states["backward_z"]:
+ delta_z -= self.z_step_size
+
+ return delta_x, delta_y, delta_z
+
+ def should_quit(self):
+ """Return True if ESC was pressed."""
+ return self.key_states["quit"]
+
+ def should_save(self):
+ """Return True if Enter was pressed (save episode)."""
+ return self.key_states["success"] or self.key_states["failure"]
+
+
+class GamepadController(InputController):
+ """Generate motion deltas from gamepad input."""
+
+ def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0, deadzone=0.1):
+ super().__init__(x_step_size, y_step_size, z_step_size)
+ self.deadzone = deadzone
+ self.joystick = None
+ self.intervention_flag = False
+
+ def start(self):
+ """Initialize pygame and the gamepad."""
+ import pygame
+
+ pygame.init()
+ pygame.joystick.init()
+
+ if pygame.joystick.get_count() == 0:
+ logging.error("No gamepad detected. Please connect a gamepad and try again.")
+ self.running = False
+ return
+
+ self.joystick = pygame.joystick.Joystick(0)
+ self.joystick.init()
+ logging.info(f"Initialized gamepad: {self.joystick.get_name()}")
+
+ print("Gamepad controls:")
+ print(" Left analog stick: Move in X-Y plane")
+ print(" Right analog stick (vertical): Move in Z axis")
+ print(" B/Circle button: Exit")
+ print(" Y/Triangle button: End episode with SUCCESS")
+ print(" A/Cross button: End episode with FAILURE")
+ print(" X/Square button: Rerecord episode")
+
+ def stop(self):
+ """Clean up pygame resources."""
+ import pygame
+
+ if pygame.joystick.get_init():
+ if self.joystick:
+ self.joystick.quit()
+ pygame.joystick.quit()
+ pygame.quit()
+
+ def update(self):
+ """Process pygame events to get fresh gamepad readings."""
+ import pygame
+
+ for event in pygame.event.get():
+ if event.type == pygame.JOYBUTTONDOWN:
+ if event.button == 3:
+ self.episode_end_status = "success"
+ # A button (1) for failure
+ elif event.button == 1:
+ self.episode_end_status = "failure"
+ # X button (0) for rerecord
+ elif event.button == 0:
+ self.episode_end_status = "rerecord_episode"
+
+ # RB button (6) for closing gripper
+ elif event.button == 6:
+ self.close_gripper_command = True
+
+ # LT button (7) for opening gripper
+ elif event.button == 7:
+ self.open_gripper_command = True
+
+ # Reset episode status on button release
+ elif event.type == pygame.JOYBUTTONUP:
+ if event.button in [0, 2, 3]:
+ self.episode_end_status = None
+
+ elif event.button == 6:
+ self.close_gripper_command = False
+
+ elif event.button == 7:
+ self.open_gripper_command = False
+
+ # Check for RB button (typically button 5) for intervention flag
+ if self.joystick.get_button(5):
+ self.intervention_flag = True
+ else:
+ self.intervention_flag = False
+
+ def get_deltas(self):
+ """Get the current movement deltas from gamepad state."""
+ import pygame
+
+ try:
+ # Read joystick axes
+ # Left stick X and Y (typically axes 0 and 1)
+ y_input = self.joystick.get_axis(0) # Left/Right
+ x_input = self.joystick.get_axis(1) # Up/Down (often inverted)
+
+ # Right stick Y (typically axis 3 or 4)
+ z_input = self.joystick.get_axis(3) # Up/Down for Z
+
+ # Apply deadzone to avoid drift
+ x_input = 0 if abs(x_input) < self.deadzone else x_input
+ y_input = 0 if abs(y_input) < self.deadzone else y_input
+ z_input = 0 if abs(z_input) < self.deadzone else z_input
+
+ # Calculate deltas (note: may need to invert axes depending on controller)
+ delta_x = -x_input * self.x_step_size # Forward/backward
+ delta_y = -y_input * self.y_step_size # Left/right
+ delta_z = -z_input * self.z_step_size # Up/down
+
+ return delta_x, delta_y, delta_z
+
+ except pygame.error:
+ logging.error("Error reading gamepad. Is it still connected?")
+ return 0.0, 0.0, 0.0
+
+
+class GamepadControllerHID(InputController):
+ """Generate motion deltas from gamepad input using HIDAPI."""
+
+ def __init__(
+ self,
+ x_step_size=1.0,
+ y_step_size=1.0,
+ z_step_size=1.0,
+ deadzone=0.1,
+ ):
+ """
+ Initialize the HID gamepad controller.
+
+ Args:
+ step_size: Base movement step size in meters
+ z_scale: Scaling factor for Z-axis movement
+ deadzone: Joystick deadzone to prevent drift
+ """
+ super().__init__(x_step_size, y_step_size, z_step_size)
+ self.deadzone = deadzone
+ self.device = None
+ self.device_info = None
+
+ # Movement values (normalized from -1.0 to 1.0)
+ self.left_x = 0.0
+ self.left_y = 0.0
+ self.right_x = 0.0
+ self.right_y = 0.0
+
+ # Button states
+ self.buttons = {}
+ self.quit_requested = False
+ self.save_requested = False
+
+ def find_device(self):
+ """Look for the gamepad device by vendor and product ID."""
+ import hid
+
+ devices = hid.enumerate()
+ for device in devices:
+ device_name = device["product_string"]
+ if any(controller in device_name for controller in ["Logitech", "Xbox", "PS4", "PS5"]):
+ return device
+
+ logging.error(
+ "No gamepad found, check the connection and the product string in HID to add your gamepad"
+ )
+ return None
+
+ def start(self):
+ """Connect to the gamepad using HIDAPI."""
+ import hid
+
+ self.device_info = self.find_device()
+ if not self.device_info:
+ self.running = False
+ return
+
+ try:
+ logging.info(f"Connecting to gamepad at path: {self.device_info['path']}")
+ self.device = hid.device()
+ self.device.open_path(self.device_info["path"])
+ self.device.set_nonblocking(1)
+
+ manufacturer = self.device.get_manufacturer_string()
+ product = self.device.get_product_string()
+ logging.info(f"Connected to {manufacturer} {product}")
+
+ logging.info("Gamepad controls (HID mode):")
+ logging.info(" Left analog stick: Move in X-Y plane")
+ logging.info(" Right analog stick: Move in Z axis (vertical)")
+ logging.info(" Button 1/B/Circle: Exit")
+ logging.info(" Button 2/A/Cross: End episode with SUCCESS")
+ logging.info(" Button 3/X/Square: End episode with FAILURE")
+
+ except OSError as e:
+ logging.error(f"Error opening gamepad: {e}")
+ logging.error("You might need to run this with sudo/admin privileges on some systems")
+ self.running = False
+
+ def stop(self):
+ """Close the HID device connection."""
+ if self.device:
+ self.device.close()
+ self.device = None
+
+ def update(self):
+ """
+ Read and process the latest gamepad data.
+ Due to an issue with the HIDAPI, we need to read the read the device several times in order to get a stable reading
+ """
+ for _ in range(10):
+ self._update()
+
+ def _update(self):
+ """Read and process the latest gamepad data."""
+ if not self.device or not self.running:
+ return
+
+ try:
+ # Read data from the gamepad
+ data = self.device.read(64)
+ # Interpret gamepad data - this will vary by controller model
+ # These offsets are for the Logitech RumblePad 2
+ if data and len(data) >= 8:
+ # Normalize joystick values from 0-255 to -1.0-1.0
+ self.left_y = (data[1] - 128) / 128.0
+ self.left_x = (data[2] - 128) / 128.0
+ self.right_x = (data[3] - 128) / 128.0
+ self.right_y = (data[4] - 128) / 128.0
+
+ # Apply deadzone
+ self.left_y = 0 if abs(self.left_y) < self.deadzone else self.left_y
+ self.left_x = 0 if abs(self.left_x) < self.deadzone else self.left_x
+ self.right_x = 0 if abs(self.right_x) < self.deadzone else self.right_x
+ self.right_y = 0 if abs(self.right_y) < self.deadzone else self.right_y
+
+ # Parse button states (byte 5 in the Logitech RumblePad 2)
+ buttons = data[5]
+
+ # Check if RB is pressed then the intervention flag should be set
+ self.intervention_flag = data[6] in [2, 6, 10, 14]
+
+ # Check if RT is pressed
+ self.open_gripper_command = data[6] in [8, 10, 12]
+
+ # Check if LT is pressed
+ self.close_gripper_command = data[6] in [4, 6, 12]
+
+ # Check if Y/Triangle button (bit 7) is pressed for saving
+ # Check if X/Square button (bit 5) is pressed for failure
+ # Check if A/Cross button (bit 4) is pressed for rerecording
+ if buttons & 1 << 7:
+ self.episode_end_status = "success"
+ elif buttons & 1 << 5:
+ self.episode_end_status = "failure"
+ elif buttons & 1 << 4:
+ self.episode_end_status = "rerecord_episode"
+ else:
+ self.episode_end_status = None
+
+ except OSError as e:
+ logging.error(f"Error reading from gamepad: {e}")
+
+ def get_deltas(self):
+ """Get the current movement deltas from gamepad state."""
+ # Calculate deltas - invert as needed based on controller orientation
+ delta_x = -self.left_x * self.x_step_size # Forward/backward
+ delta_y = -self.left_y * self.y_step_size # Left/right
+ delta_z = -self.right_y * self.z_step_size # Up/down
+
+ return delta_x, delta_y, delta_z
+
+ def should_quit(self):
+ """Return True if quit button was pressed."""
+ return self.quit_requested
+
+ def should_save(self):
+ """Return True if save button was pressed."""
+ return self.save_requested
diff --git a/src/lerobot/teleoperators/gamepad/teleop_gamepad.py b/src/lerobot/teleoperators/gamepad/teleop_gamepad.py
new file mode 100644
index 0000000000..98a0647e21
--- /dev/null
+++ b/src/lerobot/teleoperators/gamepad/teleop_gamepad.py
@@ -0,0 +1,138 @@
+# !/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+from enum import IntEnum
+from typing import Any
+
+import numpy as np
+
+from ..teleoperator import Teleoperator
+from .configuration_gamepad import GamepadTeleopConfig
+
+
+class GripperAction(IntEnum):
+ CLOSE = 0
+ STAY = 1
+ OPEN = 2
+
+
+gripper_action_map = {
+ "close": GripperAction.CLOSE.value,
+ "open": GripperAction.OPEN.value,
+ "stay": GripperAction.STAY.value,
+}
+
+
+class GamepadTeleop(Teleoperator):
+ """
+ Teleop class to use gamepad inputs for control.
+ """
+
+ config_class = GamepadTeleopConfig
+ name = "gamepad"
+
+ def __init__(self, config: GamepadTeleopConfig):
+ super().__init__(config)
+ self.config = config
+ self.robot_type = config.type
+
+ self.gamepad = None
+
+ @property
+ def action_features(self) -> dict:
+ if self.config.use_gripper:
+ return {
+ "dtype": "float32",
+ "shape": (4,),
+ "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3},
+ }
+ else:
+ return {
+ "dtype": "float32",
+ "shape": (3,),
+ "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2},
+ }
+
+ @property
+ def feedback_features(self) -> dict:
+ return {}
+
+ def connect(self) -> None:
+ # use HidApi for macos
+ if sys.platform == "darwin":
+ # NOTE: On macOS, pygame doesn’t reliably detect input from some controllers so we fall back to hidapi
+ from .gamepad_utils import GamepadControllerHID as Gamepad
+ else:
+ from .gamepad_utils import GamepadController as Gamepad
+
+ self.gamepad = Gamepad()
+ self.gamepad.start()
+
+ def get_action(self) -> dict[str, Any]:
+ # Update the controller to get fresh inputs
+ self.gamepad.update()
+
+ # Get movement deltas from the controller
+ delta_x, delta_y, delta_z = self.gamepad.get_deltas()
+
+ # Create action from gamepad input
+ gamepad_action = np.array([delta_x, delta_y, delta_z], dtype=np.float32)
+
+ action_dict = {
+ "delta_x": gamepad_action[0],
+ "delta_y": gamepad_action[1],
+ "delta_z": gamepad_action[2],
+ }
+
+ # Default gripper action is to stay
+ gripper_action = GripperAction.STAY.value
+ if self.config.use_gripper:
+ gripper_command = self.gamepad.gripper_command()
+ gripper_action = gripper_action_map[gripper_command]
+ action_dict["gripper"] = gripper_action
+
+ return action_dict
+
+ def disconnect(self) -> None:
+ """Disconnect from the gamepad."""
+ if self.gamepad is not None:
+ self.gamepad.stop()
+ self.gamepad = None
+
+ def is_connected(self) -> bool:
+ """Check if gamepad is connected."""
+ return self.gamepad is not None
+
+ def calibrate(self) -> None:
+ """Calibrate the gamepad."""
+ # No calibration needed for gamepad
+ pass
+
+ def is_calibrated(self) -> bool:
+ """Check if gamepad is calibrated."""
+ # Gamepad doesn't require calibration
+ return True
+
+ def configure(self) -> None:
+ """Configure the gamepad."""
+ # No additional configuration needed
+ pass
+
+ def send_feedback(self, feedback: dict) -> None:
+ """Send feedback to the gamepad."""
+ # Gamepad doesn't support feedback
+ pass
diff --git a/src/lerobot/teleoperators/homunculus/__init__.py b/src/lerobot/teleoperators/homunculus/__init__.py
new file mode 100644
index 0000000000..b3c6c0bf5c
--- /dev/null
+++ b/src/lerobot/teleoperators/homunculus/__init__.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_homunculus import HomunculusArmConfig, HomunculusGloveConfig
+from .homunculus_arm import HomunculusArm
+from .homunculus_glove import HomunculusGlove
+from .joints_translation import homunculus_glove_to_hope_jr_hand
diff --git a/src/lerobot/teleoperators/homunculus/config_homunculus.py b/src/lerobot/teleoperators/homunculus/config_homunculus.py
new file mode 100644
index 0000000000..da465215ab
--- /dev/null
+++ b/src/lerobot/teleoperators/homunculus/config_homunculus.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("homunculus_glove")
+@dataclass
+class HomunculusGloveConfig(TeleoperatorConfig):
+ port: str # Port to connect to the glove
+ side: str # "left" / "right"
+ baud_rate: int = 115_200
+
+ def __post_init__(self):
+ if self.side not in ["right", "left"]:
+ raise ValueError(self.side)
+
+
+@TeleoperatorConfig.register_subclass("homunculus_arm")
+@dataclass
+class HomunculusArmConfig(TeleoperatorConfig):
+ port: str # Port to connect to the arm
+ baud_rate: int = 115_200
diff --git a/src/lerobot/teleoperators/homunculus/homunculus_arm.py b/src/lerobot/teleoperators/homunculus/homunculus_arm.py
new file mode 100644
index 0000000000..6f5137af93
--- /dev/null
+++ b/src/lerobot/teleoperators/homunculus/homunculus_arm.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import threading
+from collections import deque
+from pprint import pformat
+from typing import Deque
+
+import serial
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors.motors_bus import MotorCalibration, MotorNormMode
+from lerobot.utils.utils import enter_pressed, move_cursor_up
+
+from ..teleoperator import Teleoperator
+from .config_homunculus import HomunculusArmConfig
+
+logger = logging.getLogger(__name__)
+
+
+class HomunculusArm(Teleoperator):
+ """
+ Homunculus Arm designed by Hugging Face.
+ """
+
+ config_class = HomunculusArmConfig
+ name = "homunculus_arm"
+
+ def __init__(self, config: HomunculusArmConfig):
+ super().__init__(config)
+ self.config = config
+ self.serial = serial.Serial(config.port, config.baud_rate, timeout=1)
+ self.serial_lock = threading.Lock()
+
+ self.joints = {
+ "shoulder_pitch": MotorNormMode.RANGE_M100_100,
+ "shoulder_yaw": MotorNormMode.RANGE_M100_100,
+ "shoulder_roll": MotorNormMode.RANGE_M100_100,
+ "elbow_flex": MotorNormMode.RANGE_M100_100,
+ "wrist_roll": MotorNormMode.RANGE_M100_100,
+ "wrist_yaw": MotorNormMode.RANGE_M100_100,
+ "wrist_pitch": MotorNormMode.RANGE_M100_100,
+ }
+ n = 50
+ # EMA parameters ---------------------------------------------------
+ self.n: int = n
+ self.alpha: float = 2 / (n + 1)
+ # one deque *per joint* so we can inspect raw history if needed
+ self._buffers: dict[str, Deque[int]] = {
+ joint: deque(maxlen=n)
+ for joint in (
+ "shoulder_pitch",
+ "shoulder_yaw",
+ "shoulder_roll",
+ "elbow_flex",
+ "wrist_roll",
+ "wrist_yaw",
+ "wrist_pitch",
+ )
+ }
+ # running EMA value per joint – lazily initialised on first read
+ self._ema: dict[str, float | None] = dict.fromkeys(self._buffers)
+
+ self._state: dict[str, float] | None = None
+ self.new_state_event = threading.Event()
+ self.stop_event = threading.Event()
+ self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop")
+ self.state_lock = threading.Lock()
+
+ @property
+ def action_features(self) -> dict:
+ return {f"{joint}.pos": float for joint in self.joints}
+
+ @property
+ def feedback_features(self) -> dict:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ with self.serial_lock:
+ return self.serial.is_open and self.thread.is_alive()
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ if not self.serial.is_open:
+ self.serial.open()
+ self.thread.start()
+
+ # wait for the thread to ramp up & 1st state to be ready
+ if not self.new_state_event.wait(timeout=2):
+ raise TimeoutError(f"{self}: Timed out waiting for state after 2s.")
+
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.calibration_fpath.is_file()
+
+ def calibrate(self) -> None:
+ print(
+ "\nMove all joints through their entire range of motion."
+ "\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self._record_ranges_of_motion()
+
+ self.calibration = {}
+ for id_, joint in enumerate(self.joints):
+ self.calibration[joint] = MotorCalibration(
+ id=id_,
+ drive_mode=0,
+ homing_offset=0,
+ range_min=range_mins[joint],
+ range_max=range_maxes[joint],
+ )
+
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ # TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code.
+ def _record_ranges_of_motion(
+ self, joints: list[str] | None = None, display_values: bool = True
+ ) -> tuple[dict[str, int], dict[str, int]]:
+ """Interactively record the min/max encoder values of each joint.
+
+ Move the joints while the method streams live positions. Press :kbd:`Enter` to finish.
+
+ Args:
+ joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`).
+ display_values (bool, optional): When `True` (default) a live table is printed to the console.
+
+ Raises:
+ TypeError: `joints` is not `None` or a list.
+ ValueError: any joint's recorded min and max are the same.
+
+ Returns:
+ tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values
+ observed for each joint.
+ """
+ if joints is None:
+ joints = list(self.joints)
+ elif not isinstance(joints, list):
+ raise TypeError(joints)
+
+ display_len = max(len(key) for key in joints)
+
+ start_positions = self._read(joints, normalize=False)
+ mins = start_positions.copy()
+ maxes = start_positions.copy()
+
+ user_pressed_enter = False
+ while not user_pressed_enter:
+ positions = self._read(joints, normalize=False)
+ mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()}
+ maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()}
+
+ if display_values:
+ print("\n-------------------------------------------")
+ print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}")
+ for joint in joints:
+ print(
+ f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}"
+ )
+
+ if enter_pressed():
+ user_pressed_enter = True
+
+ if display_values and not user_pressed_enter:
+ # Move cursor up to overwrite the previous output
+ move_cursor_up(len(joints) + 3)
+
+ same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]]
+ if same_min_max:
+ raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}")
+
+ return mins, maxes
+
+ def configure(self) -> None:
+ pass
+
+ # TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code.
+ def _normalize(self, values: dict[str, int]) -> dict[str, float]:
+ if not self.calibration:
+ raise RuntimeError(f"{self} has no calibration registered.")
+
+ normalized_values = {}
+ for joint, val in values.items():
+ min_ = self.calibration[joint].range_min
+ max_ = self.calibration[joint].range_max
+ drive_mode = self.calibration[joint].drive_mode
+ bounded_val = min(max_, max(min_, val))
+
+ if self.joints[joint] is MotorNormMode.RANGE_M100_100:
+ norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100
+ normalized_values[joint] = -norm if drive_mode else norm
+ elif self.joints[joint] is MotorNormMode.RANGE_0_100:
+ norm = ((bounded_val - min_) / (max_ - min_)) * 100
+ normalized_values[joint] = 100 - norm if drive_mode else norm
+
+ return normalized_values
+
+ def _apply_ema(self, raw: dict[str, int]) -> dict[str, float]:
+ """Update buffers & running EMA values; return smoothed dict."""
+ smoothed: dict[str, float] = {}
+ for joint, value in raw.items():
+ # maintain raw history
+ self._buffers[joint].append(value)
+
+ # initialise on first run
+ if self._ema[joint] is None:
+ self._ema[joint] = float(value)
+ else:
+ self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint]
+
+ smoothed[joint] = self._ema[joint]
+ return smoothed
+
+ def _read(
+ self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1
+ ) -> dict[str, int | float]:
+ """
+ Return the most recent (single) values from self.last_d,
+ optionally applying calibration.
+ """
+ if not self.new_state_event.wait(timeout=timeout):
+ raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.")
+
+ with self.state_lock:
+ state = self._state
+
+ self.new_state_event.clear()
+
+ if state is None:
+ raise RuntimeError(f"{self} Internal error: Event set but no state available.")
+
+ if joints is not None:
+ state = {k: v for k, v in state.items() if k in joints}
+
+ if normalize:
+ state = self._normalize(state)
+
+ state = self._apply_ema(state)
+
+ return state
+
+ def _read_loop(self):
+ """
+ Continuously read from the serial buffer in its own thread and sends values to the main thread through
+ a queue.
+ """
+ while not self.stop_event.is_set():
+ try:
+ raw_values = None
+ with self.serial_lock:
+ if self.serial.in_waiting > 0:
+ self.serial.flush()
+ raw_values = self.serial.readline().decode("utf-8").strip().split(" ")
+ if raw_values is None or len(raw_values) != 21: # 16 raw + 5 angle values
+ continue
+
+ joint_angles = {
+ "shoulder_pitch": int(raw_values[19]),
+ "shoulder_yaw": int(raw_values[18]),
+ "shoulder_roll": int(raw_values[20]),
+ "elbow_flex": int(raw_values[17]),
+ "wrist_roll": int(raw_values[16]),
+ "wrist_yaw": int(raw_values[1]),
+ "wrist_pitch": int(raw_values[0]),
+ }
+
+ with self.state_lock:
+ self._state = joint_angles
+ self.new_state_event.set()
+
+ except Exception as e:
+ logger.debug(f"Error reading frame in background thread for {self}: {e}")
+
+ def get_action(self) -> dict[str, float]:
+ joint_positions = self._read()
+ return {f"{joint}.pos": pos for joint, pos in joint_positions.items()}
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ raise NotImplementedError
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.stop_event.set()
+ self.thread.join(timeout=1)
+ self.serial.close()
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/teleoperators/homunculus/homunculus_glove.py b/src/lerobot/teleoperators/homunculus/homunculus_glove.py
new file mode 100644
index 0000000000..7b0ced9f60
--- /dev/null
+++ b/src/lerobot/teleoperators/homunculus/homunculus_glove.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import threading
+from collections import deque
+from pprint import pformat
+from typing import Deque
+
+import serial
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import MotorCalibration
+from lerobot.motors.motors_bus import MotorNormMode
+from lerobot.teleoperators.homunculus.joints_translation import homunculus_glove_to_hope_jr_hand
+from lerobot.utils.utils import enter_pressed, move_cursor_up
+
+from ..teleoperator import Teleoperator
+from .config_homunculus import HomunculusGloveConfig
+
+logger = logging.getLogger(__name__)
+
+LEFT_HAND_INVERSIONS = [
+ "thumb_cmc",
+ "index_dip",
+ "middle_mcp_abduction",
+ "middle_dip",
+ "pinky_mcp_abduction",
+ "pinky_dip",
+]
+
+RIGHT_HAND_INVERSIONS = [
+ "thumb_mcp",
+ "thumb_cmc",
+ "thumb_pip",
+ "thumb_dip",
+ "index_mcp_abduction",
+ # "index_dip",
+ "middle_mcp_abduction",
+ # "middle_dip",
+ "ring_mcp_abduction",
+ "ring_mcp_flexion",
+ # "ring_dip",
+ "pinky_mcp_abduction",
+]
+
+
+class HomunculusGlove(Teleoperator):
+ """
+ Homunculus Glove designed by NepYope & Hugging Face.
+ """
+
+ config_class = HomunculusGloveConfig
+ name = "homunculus_glove"
+
+ def __init__(self, config: HomunculusGloveConfig):
+ super().__init__(config)
+ self.config = config
+ self.serial = serial.Serial(config.port, config.baud_rate, timeout=1)
+ self.serial_lock = threading.Lock()
+
+ self.joints = {
+ "thumb_cmc": MotorNormMode.RANGE_0_100,
+ "thumb_mcp": MotorNormMode.RANGE_0_100,
+ "thumb_pip": MotorNormMode.RANGE_0_100,
+ "thumb_dip": MotorNormMode.RANGE_0_100,
+ "index_mcp_abduction": MotorNormMode.RANGE_M100_100,
+ "index_mcp_flexion": MotorNormMode.RANGE_0_100,
+ "index_dip": MotorNormMode.RANGE_0_100,
+ "middle_mcp_abduction": MotorNormMode.RANGE_M100_100,
+ "middle_mcp_flexion": MotorNormMode.RANGE_0_100,
+ "middle_dip": MotorNormMode.RANGE_0_100,
+ "ring_mcp_abduction": MotorNormMode.RANGE_M100_100,
+ "ring_mcp_flexion": MotorNormMode.RANGE_0_100,
+ "ring_dip": MotorNormMode.RANGE_0_100,
+ "pinky_mcp_abduction": MotorNormMode.RANGE_M100_100,
+ "pinky_mcp_flexion": MotorNormMode.RANGE_0_100,
+ "pinky_dip": MotorNormMode.RANGE_0_100,
+ }
+ self.inverted_joints = RIGHT_HAND_INVERSIONS if config.side == "right" else LEFT_HAND_INVERSIONS
+
+ n = 10
+ # EMA parameters ---------------------------------------------------
+ self.n: int = n
+ self.alpha: float = 2 / (n + 1)
+ # one deque *per joint* so we can inspect raw history if needed
+ self._buffers: dict[str, Deque[int]] = {joint: deque(maxlen=n) for joint in self.joints}
+ # running EMA value per joint – lazily initialised on first read
+ self._ema: dict[str, float | None] = dict.fromkeys(self._buffers)
+
+ self._state: dict[str, float] | None = None
+ self.new_state_event = threading.Event()
+ self.stop_event = threading.Event()
+ self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop")
+ self.state_lock = threading.Lock()
+
+ @property
+ def action_features(self) -> dict:
+ return {f"{joint}.pos": float for joint in self.joints}
+
+ @property
+ def feedback_features(self) -> dict:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ with self.serial_lock:
+ return self.serial.is_open and self.thread.is_alive()
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ if not self.serial.is_open:
+ self.serial.open()
+ self.thread.start()
+
+ # wait for the thread to ramp up & 1st state to be ready
+ if not self.new_state_event.wait(timeout=2):
+ raise TimeoutError(f"{self}: Timed out waiting for state after 2s.")
+
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.calibration_fpath.is_file()
+
+ def calibrate(self) -> None:
+ range_mins, range_maxes = {}, {}
+ for finger in ["thumb", "index", "middle", "ring", "pinky"]:
+ print(
+ f"\nMove {finger} through its entire range of motion."
+ "\nRecording positions. Press ENTER to stop..."
+ )
+ finger_joints = [joint for joint in self.joints if joint.startswith(finger)]
+ finger_mins, finger_maxes = self._record_ranges_of_motion(finger_joints)
+ range_mins.update(finger_mins)
+ range_maxes.update(finger_maxes)
+
+ self.calibration = {}
+ for id_, joint in enumerate(self.joints):
+ self.calibration[joint] = MotorCalibration(
+ id=id_,
+ drive_mode=1 if joint in self.inverted_joints else 0,
+ homing_offset=0,
+ range_min=range_mins[joint],
+ range_max=range_maxes[joint],
+ )
+
+ self._save_calibration()
+ print("Calibration saved to", self.calibration_fpath)
+
+ # TODO(Steven): This function is copy/paste from the `HomunculusArm` class. Consider moving it to an utility to reduce duplicated code.
+ def _record_ranges_of_motion(
+ self, joints: list[str] | None = None, display_values: bool = True
+ ) -> tuple[dict[str, int], dict[str, int]]:
+ """Interactively record the min/max encoder values of each joint.
+
+ Move the joints while the method streams live positions. Press :kbd:`Enter` to finish.
+
+ Args:
+ joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`).
+ display_values (bool, optional): When `True` (default) a live table is printed to the console.
+
+ Raises:
+ TypeError: `joints` is not `None` or a list.
+ ValueError: any joint's recorded min and max are the same.
+
+ Returns:
+ tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values
+ observed for each joint.
+ """
+ if joints is None:
+ joints = list(self.joints)
+ elif not isinstance(joints, list):
+ raise TypeError(joints)
+
+ display_len = max(len(key) for key in joints)
+
+ start_positions = self._read(joints, normalize=False)
+ mins = start_positions.copy()
+ maxes = start_positions.copy()
+
+ user_pressed_enter = False
+ while not user_pressed_enter:
+ positions = self._read(joints, normalize=False)
+ mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()}
+ maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()}
+
+ if display_values:
+ print("\n-------------------------------------------")
+ print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}")
+ for joint in joints:
+ print(
+ f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}"
+ )
+
+ if enter_pressed():
+ user_pressed_enter = True
+
+ if display_values and not user_pressed_enter:
+ # Move cursor up to overwrite the previous output
+ move_cursor_up(len(joints) + 3)
+
+ same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]]
+ if same_min_max:
+ raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}")
+
+ return mins, maxes
+
+ def configure(self) -> None:
+ pass
+
+ # TODO(Steven): This function is copy/paste from the `HomunculusArm` class. Consider moving it to an utility to reduce duplicated code.
+ def _normalize(self, values: dict[str, int]) -> dict[str, float]:
+ if not self.calibration:
+ raise RuntimeError(f"{self} has no calibration registered.")
+
+ normalized_values = {}
+ for joint, val in values.items():
+ min_ = self.calibration[joint].range_min
+ max_ = self.calibration[joint].range_max
+ drive_mode = self.calibration[joint].drive_mode
+ bounded_val = min(max_, max(min_, val))
+
+ if self.joints[joint] is MotorNormMode.RANGE_M100_100:
+ norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100
+ normalized_values[joint] = -norm if drive_mode else norm
+ elif self.joints[joint] is MotorNormMode.RANGE_0_100:
+ norm = ((bounded_val - min_) / (max_ - min_)) * 100
+ normalized_values[joint] = 100 - norm if drive_mode else norm
+
+ return normalized_values
+
+ def _apply_ema(self, raw: dict[str, int]) -> dict[str, int]:
+ """Update buffers & running EMA values; return smoothed dict as integers."""
+ smoothed: dict[str, int] = {}
+ for joint, value in raw.items():
+ # maintain raw history
+ self._buffers[joint].append(value)
+
+ # initialise on first run
+ if self._ema[joint] is None:
+ self._ema[joint] = float(value)
+ else:
+ self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint]
+
+ # Convert back to int for compatibility with normalization
+ smoothed[joint] = int(round(self._ema[joint]))
+ return smoothed
+
+ def _read(
+ self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1
+ ) -> dict[str, int | float]:
+ """
+ Return the most recent (single) values from self.last_d,
+ optionally applying calibration.
+ """
+ if not self.new_state_event.wait(timeout=timeout):
+ raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.")
+
+ with self.state_lock:
+ state = self._state
+
+ self.new_state_event.clear()
+
+ if state is None:
+ raise RuntimeError(f"{self} Internal error: Event set but no state available.")
+
+ if joints is not None:
+ state = {k: v for k, v in state.items() if k in joints}
+
+ # Apply EMA smoothing to raw values first
+ state = self._apply_ema(state)
+
+ # Then normalize if requested
+ if normalize:
+ state = self._normalize(state)
+
+ return state
+
+ def _read_loop(self):
+ """
+ Continuously read from the serial buffer in its own thread and sends values to the main thread through
+ a queue.
+ """
+ while not self.stop_event.is_set():
+ try:
+ positions = None
+ with self.serial_lock:
+ if self.serial.in_waiting > 0:
+ self.serial.flush()
+ positions = self.serial.readline().decode("utf-8").strip().split(" ")
+ if positions is None or len(positions) != len(self.joints):
+ continue
+
+ joint_positions = {joint: int(pos) for joint, pos in zip(self.joints, positions, strict=True)}
+
+ with self.state_lock:
+ self._state = joint_positions
+ self.new_state_event.set()
+
+ except Exception as e:
+ logger.debug(f"Error reading frame in background thread for {self}: {e}")
+
+ def get_action(self) -> dict[str, float]:
+ joint_positions = self._read()
+ return homunculus_glove_to_hope_jr_hand(
+ {f"{joint}.pos": pos for joint, pos in joint_positions.items()}
+ )
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ raise NotImplementedError
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.stop_event.set()
+ self.thread.join(timeout=1)
+ self.serial.close()
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/teleoperators/homunculus/joints_translation.py b/src/lerobot/teleoperators/homunculus/joints_translation.py
new file mode 100644
index 0000000000..f14f7b3ef5
--- /dev/null
+++ b/src/lerobot/teleoperators/homunculus/joints_translation.py
@@ -0,0 +1,63 @@
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+INDEX_SPLAY = 0.3
+MIDDLE_SPLAY = 0.3
+RING_SPLAY = 0.3
+PINKY_SPLAY = 0.5
+
+
+def get_ulnar_flexion(flexion: float, abduction: float, splay: float):
+ return -abduction * splay + flexion * (1 - splay)
+
+
+def get_radial_flexion(flexion: float, abduction: float, splay: float):
+ return abduction * splay + flexion * (1 - splay)
+
+
+def homunculus_glove_to_hope_jr_hand(glove_action: dict[str, float]) -> dict[str, float]:
+ return {
+ "thumb_cmc.pos": glove_action["thumb_cmc.pos"],
+ "thumb_mcp.pos": glove_action["thumb_mcp.pos"],
+ "thumb_pip.pos": glove_action["thumb_pip.pos"],
+ "thumb_dip.pos": glove_action["thumb_dip.pos"],
+ "index_radial_flexor.pos": get_radial_flexion(
+ glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY
+ ),
+ "index_ulnar_flexor.pos": get_ulnar_flexion(
+ glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY
+ ),
+ "index_pip_dip.pos": glove_action["index_dip.pos"],
+ "middle_radial_flexor.pos": get_radial_flexion(
+ glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY
+ ),
+ "middle_ulnar_flexor.pos": get_ulnar_flexion(
+ glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY
+ ),
+ "middle_pip_dip.pos": glove_action["middle_dip.pos"],
+ "ring_radial_flexor.pos": get_radial_flexion(
+ glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY
+ ),
+ "ring_ulnar_flexor.pos": get_ulnar_flexion(
+ glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY
+ ),
+ "ring_pip_dip.pos": glove_action["ring_dip.pos"],
+ "pinky_radial_flexor.pos": get_radial_flexion(
+ glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY
+ ),
+ "pinky_ulnar_flexor.pos": get_ulnar_flexion(
+ glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY
+ ),
+ "pinky_pip_dip.pos": glove_action["pinky_dip.pos"],
+ }
diff --git a/src/lerobot/teleoperators/keyboard/__init__.py b/src/lerobot/teleoperators/keyboard/__init__.py
new file mode 100644
index 0000000000..72d01003a1
--- /dev/null
+++ b/src/lerobot/teleoperators/keyboard/__init__.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .configuration_keyboard import KeyboardEndEffectorTeleopConfig, KeyboardTeleopConfig
+from .teleop_keyboard import KeyboardEndEffectorTeleop, KeyboardTeleop
+
+__all__ = [
+ "KeyboardTeleopConfig",
+ "KeyboardTeleop",
+ "KeyboardEndEffectorTeleopConfig",
+ "KeyboardEndEffectorTeleop",
+]
diff --git a/src/lerobot/teleoperators/keyboard/configuration_keyboard.py b/src/lerobot/teleoperators/keyboard/configuration_keyboard.py
new file mode 100644
index 0000000000..5d5ef364f7
--- /dev/null
+++ b/src/lerobot/teleoperators/keyboard/configuration_keyboard.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("keyboard")
+@dataclass
+class KeyboardTeleopConfig(TeleoperatorConfig):
+ # TODO(Steven): Consider setting in here the keys that we want to capture/listen
+ mock: bool = False
+
+
+@TeleoperatorConfig.register_subclass("keyboard_ee")
+@dataclass
+class KeyboardEndEffectorTeleopConfig(KeyboardTeleopConfig):
+ use_gripper: bool = True
diff --git a/src/lerobot/teleoperators/keyboard/teleop_keyboard.py b/src/lerobot/teleoperators/keyboard/teleop_keyboard.py
new file mode 100644
index 0000000000..d034982f12
--- /dev/null
+++ b/src/lerobot/teleoperators/keyboard/teleop_keyboard.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import sys
+import time
+from queue import Queue
+from typing import Any
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+
+from ..teleoperator import Teleoperator
+from .configuration_keyboard import KeyboardEndEffectorTeleopConfig, KeyboardTeleopConfig
+
+PYNPUT_AVAILABLE = True
+try:
+ if ("DISPLAY" not in os.environ) and ("linux" in sys.platform):
+ logging.info("No DISPLAY set. Skipping pynput import.")
+ raise ImportError("pynput blocked intentionally due to no display.")
+
+ from pynput import keyboard
+except ImportError:
+ keyboard = None
+ PYNPUT_AVAILABLE = False
+except Exception as e:
+ keyboard = None
+ PYNPUT_AVAILABLE = False
+ logging.info(f"Could not import pynput: {e}")
+
+
+class KeyboardTeleop(Teleoperator):
+ """
+ Teleop class to use keyboard inputs for control.
+ """
+
+ config_class = KeyboardTeleopConfig
+ name = "keyboard"
+
+ def __init__(self, config: KeyboardTeleopConfig):
+ super().__init__(config)
+ self.config = config
+ self.robot_type = config.type
+
+ self.event_queue = Queue()
+ self.current_pressed = {}
+ self.listener = None
+ self.logs = {}
+
+ @property
+ def action_features(self) -> dict:
+ return {
+ "dtype": "float32",
+ "shape": (len(self.arm),),
+ "names": {"motors": list(self.arm.motors)},
+ }
+
+ @property
+ def feedback_features(self) -> dict:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ return PYNPUT_AVAILABLE and isinstance(self.listener, keyboard.Listener) and self.listener.is_alive()
+
+ @property
+ def is_calibrated(self) -> bool:
+ pass
+
+ def connect(self) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(
+ "Keyboard is already connected. Do not run `robot.connect()` twice."
+ )
+
+ if PYNPUT_AVAILABLE:
+ logging.info("pynput is available - enabling local keyboard listener.")
+ self.listener = keyboard.Listener(
+ on_press=self._on_press,
+ on_release=self._on_release,
+ )
+ self.listener.start()
+ else:
+ logging.info("pynput not available - skipping local keyboard listener.")
+ self.listener = None
+
+ def calibrate(self) -> None:
+ pass
+
+ def _on_press(self, key):
+ if hasattr(key, "char"):
+ self.event_queue.put((key.char, True))
+
+ def _on_release(self, key):
+ if hasattr(key, "char"):
+ self.event_queue.put((key.char, False))
+ if key == keyboard.Key.esc:
+ logging.info("ESC pressed, disconnecting.")
+ self.disconnect()
+
+ def _drain_pressed_keys(self):
+ while not self.event_queue.empty():
+ key_char, is_pressed = self.event_queue.get_nowait()
+ self.current_pressed[key_char] = is_pressed
+
+ def configure(self):
+ pass
+
+ def get_action(self) -> dict[str, Any]:
+ before_read_t = time.perf_counter()
+
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ "KeyboardTeleop is not connected. You need to run `connect()` before `get_action()`."
+ )
+
+ self._drain_pressed_keys()
+
+ # Generate action based on current key states
+ action = {key for key, val in self.current_pressed.items() if val}
+ self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
+
+ return dict.fromkeys(action, None)
+
+ def send_feedback(self, feedback: dict[str, Any]) -> None:
+ pass
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ "KeyboardTeleop is not connected. You need to run `robot.connect()` before `disconnect()`."
+ )
+ if self.listener is not None:
+ self.listener.stop()
+
+
+class KeyboardEndEffectorTeleop(KeyboardTeleop):
+ """
+ Teleop class to use keyboard inputs for end effector control.
+ Designed to be used with the `So100FollowerEndEffector` robot.
+ """
+
+ config_class = KeyboardEndEffectorTeleopConfig
+ name = "keyboard_ee"
+
+ def __init__(self, config: KeyboardEndEffectorTeleopConfig):
+ super().__init__(config)
+ self.config = config
+ self.misc_keys_queue = Queue()
+
+ @property
+ def action_features(self) -> dict:
+ if self.config.use_gripper:
+ return {
+ "dtype": "float32",
+ "shape": (4,),
+ "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3},
+ }
+ else:
+ return {
+ "dtype": "float32",
+ "shape": (3,),
+ "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2},
+ }
+
+ def _on_press(self, key):
+ if hasattr(key, "char"):
+ key = key.char
+ self.event_queue.put((key, True))
+
+ def _on_release(self, key):
+ if hasattr(key, "char"):
+ key = key.char
+ self.event_queue.put((key, False))
+
+ def get_action(self) -> dict[str, Any]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(
+ "KeyboardTeleop is not connected. You need to run `connect()` before `get_action()`."
+ )
+
+ self._drain_pressed_keys()
+ delta_x = 0.0
+ delta_y = 0.0
+ delta_z = 0.0
+ gripper_action = 1.0
+
+ # Generate action based on current key states
+ for key, val in self.current_pressed.items():
+ if key == keyboard.Key.up:
+ delta_y = -int(val)
+ elif key == keyboard.Key.down:
+ delta_y = int(val)
+ elif key == keyboard.Key.left:
+ delta_x = int(val)
+ elif key == keyboard.Key.right:
+ delta_x = -int(val)
+ elif key == keyboard.Key.shift:
+ delta_z = -int(val)
+ elif key == keyboard.Key.shift_r:
+ delta_z = int(val)
+ elif key == keyboard.Key.ctrl_r:
+ # Gripper actions are expected to be between 0 (close), 1 (stay), 2 (open)
+ gripper_action = int(val) + 1
+ elif key == keyboard.Key.ctrl_l:
+ gripper_action = int(val) - 1
+ elif val:
+ # If the key is pressed, add it to the misc_keys_queue
+ # this will record key presses that are not part of the delta_x, delta_y, delta_z
+ # this is useful for retrieving other events like interventions for RL, episode success, etc.
+ self.misc_keys_queue.put(key)
+
+ self.current_pressed.clear()
+
+ action_dict = {
+ "delta_x": delta_x,
+ "delta_y": delta_y,
+ "delta_z": delta_z,
+ }
+
+ if self.config.use_gripper:
+ action_dict["gripper"] = gripper_action
+
+ return action_dict
diff --git a/src/lerobot/teleoperators/koch_leader/__init__.py b/src/lerobot/teleoperators/koch_leader/__init__.py
new file mode 100644
index 0000000000..1bf9d51db6
--- /dev/null
+++ b/src/lerobot/teleoperators/koch_leader/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_koch_leader import KochLeaderConfig
+from .koch_leader import KochLeader
diff --git a/src/lerobot/teleoperators/koch_leader/config_koch_leader.py b/src/lerobot/teleoperators/koch_leader/config_koch_leader.py
new file mode 100644
index 0000000000..64aaae1235
--- /dev/null
+++ b/src/lerobot/teleoperators/koch_leader/config_koch_leader.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("koch_leader")
+@dataclass
+class KochLeaderConfig(TeleoperatorConfig):
+ # Port to connect to the arm
+ port: str
+
+ # Sets the arm in torque mode with the gripper motor set to this value. This makes it possible to squeeze
+ # the gripper and have it spring back to an open position on its own.
+ gripper_open_pos: float = 50.0
diff --git a/src/lerobot/teleoperators/koch_leader/koch_leader.py b/src/lerobot/teleoperators/koch_leader/koch_leader.py
new file mode 100644
index 0000000000..8eb076faee
--- /dev/null
+++ b/src/lerobot/teleoperators/koch_leader/koch_leader.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.dynamixel import (
+ DriveMode,
+ DynamixelMotorsBus,
+ OperatingMode,
+)
+
+from ..teleoperator import Teleoperator
+from .config_koch_leader import KochLeaderConfig
+
+logger = logging.getLogger(__name__)
+
+
+class KochLeader(Teleoperator):
+ """
+ - [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow
+ expansion, developed by Alexander Koch from [Tau Robotics](https://tau-robotics.com)
+ - [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss
+ """
+
+ config_class = KochLeaderConfig
+ name = "koch_leader"
+
+ def __init__(self, config: KochLeaderConfig):
+ super().__init__(config)
+ self.config = config
+ self.bus = DynamixelMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "xl330-m077", MotorNormMode.RANGE_M100_100),
+ "shoulder_lift": Motor(2, "xl330-m077", MotorNormMode.RANGE_M100_100),
+ "elbow_flex": Motor(3, "xl330-m077", MotorNormMode.RANGE_M100_100),
+ "wrist_flex": Motor(4, "xl330-m077", MotorNormMode.RANGE_M100_100),
+ "wrist_roll": Motor(5, "xl330-m077", MotorNormMode.RANGE_M100_100),
+ "gripper": Motor(6, "xl330-m077", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+
+ @property
+ def action_features(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def feedback_features(self) -> dict[str, type]:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ self.bus.write("Drive_Mode", "elbow_flex", DriveMode.INVERTED.value)
+ drive_modes = {motor: 1 if motor == "elbow_flex" else 0 for motor in self.bus.motors}
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ full_turn_motors = ["shoulder_pan", "wrist_roll"]
+ unknown_range_motors = [motor for motor in self.bus.motors if motor not in full_turn_motors]
+ print(
+ f"Move all joints except {full_turn_motors} sequentially through their "
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ for motor in full_turn_motors:
+ range_mins[motor] = 0
+ range_maxes[motor] = 4095
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=drive_modes[motor],
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ logger.info(f"Calibration saved to {self.calibration_fpath}")
+
+ def configure(self) -> None:
+ self.bus.disable_torque()
+ self.bus.configure_motors()
+ for motor in self.bus.motors:
+ if motor != "gripper":
+ # Use 'extended position mode' for all motors except gripper, because in joint mode the servos
+ # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while
+ # assembling the arm, you could end up with a servo with a position 0 or 4095 at a crucial
+ # point
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ # Use 'position control current based' for gripper to be limited by the limit of the current.
+ # For the follower gripper, it means it can grasp an object without forcing too much even tho,
+ # its goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
+ # For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger
+ # to make it move, and it will move back to its original target position when we release the force.
+ self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value)
+ # Set gripper's goal pos in current position mode so that we can use it as a trigger.
+ self.bus.enable_torque("gripper")
+ if self.is_calibrated:
+ self.bus.write("Goal_Position", "gripper", self.config.gripper_open_pos)
+
+ def setup_motors(self) -> None:
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_action(self) -> dict[str, float]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ start = time.perf_counter()
+ action = self.bus.sync_read("Present_Position")
+ action = {f"{motor}.pos": val for motor, val in action.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read action: {dt_ms:.1f}ms")
+ return action
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ # TODO(rcadene, aliberts): Implement force feedback
+ raise NotImplementedError
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect()
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/teleoperators/so100_leader/__init__.py b/src/lerobot/teleoperators/so100_leader/__init__.py
new file mode 100644
index 0000000000..747416be2e
--- /dev/null
+++ b/src/lerobot/teleoperators/so100_leader/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_so100_leader import SO100LeaderConfig
+from .so100_leader import SO100Leader
diff --git a/src/lerobot/teleoperators/so100_leader/config_so100_leader.py b/src/lerobot/teleoperators/so100_leader/config_so100_leader.py
new file mode 100644
index 0000000000..a97949b7e7
--- /dev/null
+++ b/src/lerobot/teleoperators/so100_leader/config_so100_leader.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("so100_leader")
+@dataclass
+class SO100LeaderConfig(TeleoperatorConfig):
+ # Port to connect to the arm
+ port: str
diff --git a/src/lerobot/teleoperators/so100_leader/so100_leader.py b/src/lerobot/teleoperators/so100_leader/so100_leader.py
new file mode 100644
index 0000000000..18dad44d4d
--- /dev/null
+++ b/src/lerobot/teleoperators/so100_leader/so100_leader.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+ OperatingMode,
+)
+
+from ..teleoperator import Teleoperator
+from .config_so100_leader import SO100LeaderConfig
+
+logger = logging.getLogger(__name__)
+
+
+class SO100Leader(Teleoperator):
+ """
+ [SO-100 Leader Arm](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio
+ """
+
+ config_class = SO100LeaderConfig
+ name = "so100_leader"
+
+ def __init__(self, config: SO100LeaderConfig):
+ super().__init__(config)
+ self.config = config
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "sts3215", MotorNormMode.RANGE_M100_100),
+ "shoulder_lift": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100),
+ "elbow_flex": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100),
+ "wrist_flex": Motor(4, "sts3215", MotorNormMode.RANGE_M100_100),
+ "wrist_roll": Motor(5, "sts3215", MotorNormMode.RANGE_M100_100),
+ "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+
+ @property
+ def action_features(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def feedback_features(self) -> dict[str, type]:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ full_turn_motor = "wrist_roll"
+ unknown_range_motors = [motor for motor in self.bus.motors if motor != full_turn_motor]
+ print(
+ f"Move all joints except '{full_turn_motor}' sequentially through their "
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ range_mins[full_turn_motor] = 0
+ range_maxes[full_turn_motor] = 4095
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ print(f"Calibration saved to {self.calibration_fpath}")
+
+ def configure(self) -> None:
+ self.bus.disable_torque()
+ self.bus.configure_motors()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ def setup_motors(self) -> None:
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_action(self) -> dict[str, float]:
+ start = time.perf_counter()
+ action = self.bus.sync_read("Present_Position")
+ action = {f"{motor}.pos": val for motor, val in action.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read action: {dt_ms:.1f}ms")
+ return action
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ # TODO(rcadene, aliberts): Implement force feedback
+ raise NotImplementedError
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect()
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/teleoperators/so101_leader/__init__.py b/src/lerobot/teleoperators/so101_leader/__init__.py
new file mode 100644
index 0000000000..11e277c915
--- /dev/null
+++ b/src/lerobot/teleoperators/so101_leader/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_so101_leader import SO101LeaderConfig
+from .so101_leader import SO101Leader
diff --git a/src/lerobot/teleoperators/so101_leader/config_so101_leader.py b/src/lerobot/teleoperators/so101_leader/config_so101_leader.py
new file mode 100644
index 0000000000..8d91c32dfe
--- /dev/null
+++ b/src/lerobot/teleoperators/so101_leader/config_so101_leader.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("so101_leader")
+@dataclass
+class SO101LeaderConfig(TeleoperatorConfig):
+ # Port to connect to the arm
+ port: str
+
+ use_degrees: bool = False
diff --git a/src/lerobot/teleoperators/so101_leader/so101_leader.py b/src/lerobot/teleoperators/so101_leader/so101_leader.py
new file mode 100644
index 0000000000..2ce28d2e46
--- /dev/null
+++ b/src/lerobot/teleoperators/so101_leader/so101_leader.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.feetech import (
+ FeetechMotorsBus,
+ OperatingMode,
+)
+
+from ..teleoperator import Teleoperator
+from .config_so101_leader import SO101LeaderConfig
+
+logger = logging.getLogger(__name__)
+
+
+class SO101Leader(Teleoperator):
+ """
+ SO-101 Leader Arm designed by TheRobotStudio and Hugging Face.
+ """
+
+ config_class = SO101LeaderConfig
+ name = "so101_leader"
+
+ def __init__(self, config: SO101LeaderConfig):
+ super().__init__(config)
+ self.config = config
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
+ self.bus = FeetechMotorsBus(
+ port=self.config.port,
+ motors={
+ "shoulder_pan": Motor(1, "sts3215", norm_mode_body),
+ "shoulder_lift": Motor(2, "sts3215", norm_mode_body),
+ "elbow_flex": Motor(3, "sts3215", norm_mode_body),
+ "wrist_flex": Motor(4, "sts3215", norm_mode_body),
+ "wrist_roll": Motor(5, "sts3215", norm_mode_body),
+ "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
+ },
+ calibration=self.calibration,
+ )
+
+ @property
+ def action_features(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def feedback_features(self) -> dict[str, type]:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected
+
+ def connect(self, calibrate: bool = True) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ print(
+ "Move all joints sequentially through their entire ranges "
+ "of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion()
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=0,
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ print(f"Calibration saved to {self.calibration_fpath}")
+
+ def configure(self) -> None:
+ self.bus.disable_torque()
+ self.bus.configure_motors()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
+
+ def setup_motors(self) -> None:
+ for motor in reversed(self.bus.motors):
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
+ self.bus.setup_motor(motor)
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
+
+ def get_action(self) -> dict[str, float]:
+ start = time.perf_counter()
+ action = self.bus.sync_read("Present_Position")
+ action = {f"{motor}.pos": val for motor, val in action.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read action: {dt_ms:.1f}ms")
+ return action
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ # TODO(rcadene, aliberts): Implement force feedback
+ raise NotImplementedError
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect()
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/teleoperators/stretch3_gamepad/__init__.py b/src/lerobot/teleoperators/stretch3_gamepad/__init__.py
new file mode 100644
index 0000000000..fa5a19974b
--- /dev/null
+++ b/src/lerobot/teleoperators/stretch3_gamepad/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .configuration_stretch3 import Stretch3GamePadConfig
+from .stretch3_gamepad import Stretch3GamePad
diff --git a/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py b/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py
new file mode 100644
index 0000000000..507a215898
--- /dev/null
+++ b/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("stretch3")
+@dataclass
+class Stretch3GamePadConfig(TeleoperatorConfig):
+ mock: bool = False
diff --git a/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py b/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py
new file mode 100644
index 0000000000..bdcb57d407
--- /dev/null
+++ b/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+import numpy as np
+from stretch_body.gamepad_teleop import GamePadTeleop
+from stretch_body.robot_params import RobotParams
+
+from lerobot.errors import DeviceAlreadyConnectedError
+
+from ..teleoperator import Teleoperator
+from .configuration_stretch3 import Stretch3GamePadConfig
+
+# from stretch_body.gamepad_controller.GamePadController
+GAMEPAD_BUTTONS = [
+ "middle_led_ring_button_pressed",
+ "left_stick_x",
+ "left_stick_y",
+ "right_stick_x",
+ "right_stick_y",
+ "left_stick_button_pressed",
+ "right_stick_button_pressed",
+ "bottom_button_pressed",
+ "top_button_pressed",
+ "left_button_pressed",
+ "right_button_pressed",
+ "left_shoulder_button_pressed",
+ "right_shoulder_button_pressed",
+ "select_button_pressed",
+ "start_button_pressed",
+ "left_trigger_pulled",
+ "right_trigger_pulled",
+ "bottom_pad_pressed",
+ "top_pad_pressed",
+ "left_pad_pressed",
+ "right_pad_pressed",
+]
+
+
+class Stretch3GamePad(Teleoperator):
+ """[Stretch 3](https://hello-robot.com/stretch-3-product), by Hello Robot."""
+
+ config_class = Stretch3GamePadConfig
+ name = "stretch3"
+
+ def __init__(self, config: Stretch3GamePadConfig):
+ raise NotImplementedError
+ super().__init__(config)
+
+ self.config = config
+ self.robot_type = self.config.type
+
+ self.api = GamePadTeleop(robot_instance=False)
+
+ self.is_connected = False
+ self.logs = {}
+
+ # TODO(aliberts): test this
+ RobotParams.set_logging_level("WARNING")
+ RobotParams.set_logging_formatter("brief_console_formatter")
+
+ @property
+ def action_features(self) -> dict:
+ return {
+ "dtype": "float32",
+ "shape": (len(GAMEPAD_BUTTONS),),
+ "names": {"buttons": GAMEPAD_BUTTONS},
+ }
+
+ @property
+ def feedback_features(self) -> dict:
+ return {}
+
+ def connect(self) -> None:
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(
+ "ManipulatorRobot is already connected. Do not run `robot.connect()` twice."
+ )
+
+ self.api.startup()
+ self.api._update_state() # Check controller can be read & written
+ self.api._update_modes()
+ self.is_connected = True
+
+ def calibrate(self) -> None:
+ pass
+
+ def get_action(self) -> np.ndarray:
+ # Read Stretch state
+ before_read_t = time.perf_counter()
+ action = self.api.gamepad_controller.get_state()
+ self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
+
+ action = np.asarray(list(action.values()))
+
+ return action
+
+ def send_feedback(self, feedback: np.ndarray) -> None:
+ pass
+
+ def print_logs(self) -> None:
+ pass
+ # TODO(aliberts): move robot-specific logs logic here
+
+ def disconnect(self) -> None:
+ self.api.stop()
+ self.is_connected = False
diff --git a/src/lerobot/teleoperators/teleoperator.py b/src/lerobot/teleoperators/teleoperator.py
new file mode 100644
index 0000000000..c360ee7bba
--- /dev/null
+++ b/src/lerobot/teleoperators/teleoperator.py
@@ -0,0 +1,181 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import builtins
+from pathlib import Path
+from typing import Any
+
+import draccus
+
+from lerobot.constants import HF_LEROBOT_CALIBRATION, TELEOPERATORS
+from lerobot.motors.motors_bus import MotorCalibration
+
+from .config import TeleoperatorConfig
+
+
+class Teleoperator(abc.ABC):
+ """
+ The base abstract class for all LeRobot-compatible teleoperation devices.
+
+ This class provides a standardized interface for interacting with physical teleoperators.
+ Subclasses must implement all abstract methods and properties to be usable.
+
+ Attributes:
+ config_class (RobotConfig): The expected configuration class for this teleoperator.
+ name (str): The unique name used to identify this teleoperator type.
+ """
+
+ # Set these in ALL subclasses
+ config_class: builtins.type[TeleoperatorConfig]
+ name: str
+
+ def __init__(self, config: TeleoperatorConfig):
+ self.id = config.id
+ self.calibration_dir = (
+ config.calibration_dir
+ if config.calibration_dir
+ else HF_LEROBOT_CALIBRATION / TELEOPERATORS / self.name
+ )
+ self.calibration_dir.mkdir(parents=True, exist_ok=True)
+ self.calibration_fpath = self.calibration_dir / f"{self.id}.json"
+ self.calibration: dict[str, MotorCalibration] = {}
+ if self.calibration_fpath.is_file():
+ self._load_calibration()
+
+ def __str__(self) -> str:
+ return f"{self.id} {self.__class__.__name__}"
+
+ @property
+ @abc.abstractmethod
+ def action_features(self) -> dict:
+ """
+ A dictionary describing the structure and types of the actions produced by the teleoperator. Its
+ structure (keys) should match the structure of what is returned by :pymeth:`get_action`. Values for
+ the dict should be the type of the value if it's a simple value, e.g. `float` for single
+ proprioceptive value (a joint's goal position/velocity)
+
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
+ """
+ pass
+
+ @property
+ @abc.abstractmethod
+ def feedback_features(self) -> dict:
+ """
+ A dictionary describing the structure and types of the feedback actions expected by the robot. Its
+ structure (keys) should match the structure of what is passed to :pymeth:`send_feedback`. Values for
+ the dict should be the type of the value if it's a simple value, e.g. `float` for single
+ proprioceptive value (a joint's goal position/velocity)
+
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
+ """
+ pass
+
+ @property
+ @abc.abstractmethod
+ def is_connected(self) -> bool:
+ """
+ Whether the teleoperator is currently connected or not. If `False`, calling :pymeth:`get_action`
+ or :pymeth:`send_feedback` should raise an error.
+ """
+ pass
+
+ @abc.abstractmethod
+ def connect(self, calibrate: bool = True) -> None:
+ """
+ Establish communication with the teleoperator.
+
+ Args:
+ calibrate (bool): If True, automatically calibrate the teleoperator after connecting if it's not
+ calibrated or needs calibration (this is hardware-dependant).
+ """
+ pass
+
+ @property
+ @abc.abstractmethod
+ def is_calibrated(self) -> bool:
+ """Whether the teleoperator is currently calibrated or not. Should be always `True` if not applicable"""
+ pass
+
+ @abc.abstractmethod
+ def calibrate(self) -> None:
+ """
+ Calibrate the teleoperator if applicable. If not, this should be a no-op.
+
+ This method should collect any necessary data (e.g., motor offsets) and update the
+ :pyattr:`calibration` dictionary accordingly.
+ """
+ pass
+
+ def _load_calibration(self, fpath: Path | None = None) -> None:
+ """
+ Helper to load calibration data from the specified file.
+
+ Args:
+ fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`.
+ """
+ fpath = self.calibration_fpath if fpath is None else fpath
+ with open(fpath) as f, draccus.config_type("json"):
+ self.calibration = draccus.load(dict[str, MotorCalibration], f)
+
+ def _save_calibration(self, fpath: Path | None = None) -> None:
+ """
+ Helper to save calibration data to the specified file.
+
+ Args:
+ fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`.
+ """
+ fpath = self.calibration_fpath if fpath is None else fpath
+ with open(fpath, "w") as f, draccus.config_type("json"):
+ draccus.dump(self.calibration, f, indent=4)
+
+ @abc.abstractmethod
+ def configure(self) -> None:
+ """
+ Apply any one-time or runtime configuration to the teleoperator.
+ This may include setting motor parameters, control modes, or initial state.
+ """
+ pass
+
+ @abc.abstractmethod
+ def get_action(self) -> dict[str, Any]:
+ """
+ Retrieve the current action from the teleoperator.
+
+ Returns:
+ dict[str, Any]: A flat dictionary representing the teleoperator's current actions. Its
+ structure should match :pymeth:`observation_features`.
+ """
+ pass
+
+ @abc.abstractmethod
+ def send_feedback(self, feedback: dict[str, Any]) -> None:
+ """
+ Send a feedback action command to the teleoperator.
+
+ Args:
+ feedback (dict[str, Any]): Dictionary representing the desired feedback. Its structure should match
+ :pymeth:`feedback_features`.
+
+ Returns:
+ dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by
+ safety limits on velocity.
+ """
+ pass
+
+ @abc.abstractmethod
+ def disconnect(self) -> None:
+ """Disconnect from the teleoperator and perform any necessary cleanup."""
+ pass
diff --git a/src/lerobot/teleoperators/utils.py b/src/lerobot/teleoperators/utils.py
new file mode 100644
index 0000000000..344a95d72b
--- /dev/null
+++ b/src/lerobot/teleoperators/utils.py
@@ -0,0 +1,69 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config import TeleoperatorConfig
+from .teleoperator import Teleoperator
+
+
+def make_teleoperator_from_config(config: TeleoperatorConfig) -> Teleoperator:
+ if config.type == "keyboard":
+ from .keyboard import KeyboardTeleop
+
+ return KeyboardTeleop(config)
+ elif config.type == "koch_leader":
+ from .koch_leader import KochLeader
+
+ return KochLeader(config)
+ elif config.type == "so100_leader":
+ from .so100_leader import SO100Leader
+
+ return SO100Leader(config)
+ elif config.type == "so101_leader":
+ from .so101_leader import SO101Leader
+
+ return SO101Leader(config)
+ elif config.type == "stretch3":
+ from .stretch3_gamepad import Stretch3GamePad
+
+ return Stretch3GamePad(config)
+ elif config.type == "widowx":
+ from .widowx import WidowX
+
+ return WidowX(config)
+ elif config.type == "mock_teleop":
+ from tests.mocks.mock_teleop import MockTeleop
+
+ return MockTeleop(config)
+ elif config.type == "gamepad":
+ from .gamepad.teleop_gamepad import GamepadTeleop
+
+ return GamepadTeleop(config)
+ elif config.type == "keyboard_ee":
+ from .keyboard.teleop_keyboard import KeyboardEndEffectorTeleop
+
+ return KeyboardEndEffectorTeleop(config)
+ elif config.type == "homunculus_glove":
+ from .homunculus import HomunculusGlove
+
+ return HomunculusGlove(config)
+ elif config.type == "homunculus_arm":
+ from .homunculus import HomunculusArm
+
+ return HomunculusArm(config)
+ elif config.type == "bi_so100_leader":
+ from .bi_so100_leader import BiSO100Leader
+
+ return BiSO100Leader(config)
+ else:
+ raise ValueError(config.type)
diff --git a/src/lerobot/teleoperators/widowx/__init__.py b/src/lerobot/teleoperators/widowx/__init__.py
new file mode 100644
index 0000000000..42e312f496
--- /dev/null
+++ b/src/lerobot/teleoperators/widowx/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .config_widowx import WidowXConfig
+from .widowx import WidowX
diff --git a/src/lerobot/teleoperators/widowx/config_widowx.py b/src/lerobot/teleoperators/widowx/config_widowx.py
new file mode 100644
index 0000000000..42fae12db2
--- /dev/null
+++ b/src/lerobot/teleoperators/widowx/config_widowx.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+
+from ..config import TeleoperatorConfig
+
+
+@TeleoperatorConfig.register_subclass("widowx")
+@dataclass
+class WidowXConfig(TeleoperatorConfig):
+ port: str # Port to connect to the arm
diff --git a/src/lerobot/teleoperators/widowx/widowx.py b/src/lerobot/teleoperators/widowx/widowx.py
new file mode 100644
index 0000000000..6becd767fc
--- /dev/null
+++ b/src/lerobot/teleoperators/widowx/widowx.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
+from lerobot.motors import Motor, MotorCalibration, MotorNormMode
+from lerobot.motors.dynamixel import (
+ DriveMode,
+ DynamixelMotorsBus,
+ OperatingMode,
+)
+
+from ..teleoperator import Teleoperator
+from .config_widowx import WidowXConfig
+
+logger = logging.getLogger(__name__)
+
+
+class WidowX(Teleoperator):
+ """
+ [WidowX](https://www.trossenrobotics.com/widowx-250) developed by Trossen Robotics
+ """
+
+ config_class = WidowXConfig
+ name = "widowx"
+
+ def __init__(self, config: WidowXConfig):
+ raise NotImplementedError
+ super().__init__(config)
+ self.config = config
+ self.bus = DynamixelMotorsBus(
+ port=self.config.port,
+ motors={
+ "waist": Motor(1, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "shoulder": Motor(2, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "shoulder_shadow": Motor(3, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "elbow": Motor(4, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "elbow_shadow": Motor(5, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "forearm_roll": Motor(6, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "wrist_angle": Motor(7, "xm430-w350", MotorNormMode.RANGE_M100_100),
+ "wrist_rotate": Motor(8, "xl430-w250", MotorNormMode.RANGE_M100_100),
+ "gripper": Motor(9, "xc430-w150", MotorNormMode.RANGE_0_100),
+ },
+ )
+
+ @property
+ def action_features(self) -> dict[str, type]:
+ return {f"{motor}.pos": float for motor in self.bus.motors}
+
+ @property
+ def feedback_features(self) -> dict[str, type]:
+ return {}
+
+ @property
+ def is_connected(self) -> bool:
+ return self.bus.is_connected
+
+ def connect(self, calibrate: bool = True):
+ if self.is_connected:
+ raise DeviceAlreadyConnectedError(f"{self} already connected")
+
+ self.bus.connect()
+ if not self.is_calibrated and calibrate:
+ self.calibrate()
+
+ self.configure()
+ logger.info(f"{self} connected.")
+
+ @property
+ def is_calibrated(self) -> bool:
+ return self.bus.is_calibrated
+
+ def calibrate(self) -> None:
+ raise NotImplementedError # TODO(aliberts): adapt code below (copied from koch)
+ logger.info(f"\nRunning calibration of {self}")
+ self.bus.disable_torque()
+ for motor in self.bus.motors:
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
+
+ self.bus.write("Drive_Mode", "elbow_flex", DriveMode.INVERTED.value)
+ drive_modes = {motor: 1 if motor == "elbow_flex" else 0 for motor in self.bus.motors}
+
+ input("Move robot to the middle of its range of motion and press ENTER....")
+ homing_offsets = self.bus.set_half_turn_homings()
+
+ full_turn_motors = ["shoulder_pan", "wrist_roll"]
+ unknown_range_motors = [motor for motor in self.bus.motors if motor not in full_turn_motors]
+ print(
+ f"Move all joints except {full_turn_motors} sequentially through their "
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
+ )
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
+ for motor in full_turn_motors:
+ range_mins[motor] = 0
+ range_maxes[motor] = 4095
+
+ self.calibration = {}
+ for motor, m in self.bus.motors.items():
+ self.calibration[motor] = MotorCalibration(
+ id=m.id,
+ drive_mode=drive_modes[motor],
+ homing_offset=homing_offsets[motor],
+ range_min=range_mins[motor],
+ range_max=range_maxes[motor],
+ )
+
+ self.bus.write_calibration(self.calibration)
+ self._save_calibration()
+ logger.info(f"Calibration saved to {self.calibration_fpath}")
+
+ def configure(self) -> None:
+ self.bus.disable_torque()
+ self.bus.configure_motors()
+
+ # Set secondary/shadow ID for shoulder and elbow. These joints have two motors.
+ # As a result, if only one of them is required to move to a certain position,
+ # the other will follow. This is to avoid breaking the motors.
+ self.bus.write("Secondary_ID", "shoulder_shadow", 2)
+ self.bus.write("Secondary_ID", "elbow_shadow", 4)
+
+ def get_action(self) -> dict[str, float]:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ start = time.perf_counter()
+ action = self.bus.sync_read("Present_Position")
+ action = {f"{motor}.pos": val for motor, val in action.items()}
+ dt_ms = (time.perf_counter() - start) * 1e3
+ logger.debug(f"{self} read action: {dt_ms:.1f}ms")
+ return action
+
+ def send_feedback(self, feedback: dict[str, float]) -> None:
+ raise NotImplementedError
+
+ def disconnect(self) -> None:
+ if not self.is_connected:
+ raise DeviceNotConnectedError(f"{self} is not connected.")
+
+ self.bus.disconnect()
+ logger.info(f"{self} disconnected.")
diff --git a/src/lerobot/templates/lerobot_modelcard_template.md b/src/lerobot/templates/lerobot_modelcard_template.md
new file mode 100644
index 0000000000..7b7aaa84af
--- /dev/null
+++ b/src/lerobot/templates/lerobot_modelcard_template.md
@@ -0,0 +1,75 @@
+---
+# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
+# Doc / guide: https://huggingface.co/docs/hub/model-cards
+# prettier-ignore
+{{card_data}}
+---
+
+# Model Card for {{ model_name | default("Model ID", true) }}
+
+
+
+{% if model_name == "smolvla" %}
+[SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware.
+{% elif model_name == "act" %}
+[Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
+{% elif model_name == "tdmpc" %}
+[TD-MPC](https://huggingface.co/papers/2203.04955) combines model-free and model-based approaches to improve sample efficiency and performance in continuous control tasks by using a learned latent dynamics model and terminal value function.
+{% elif model_name == "diffusion" %}
+[Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation.
+{% elif model_name == "vqbet" %}
+[VQ-BET](https://huggingface.co/papers/2403.03181) combines vector-quantised action tokens with Behaviour Transformers to discretise control and achieve data-efficient imitation across diverse skills.
+{% elif model_name == "pi0" %}
+[Pi0](https://huggingface.co/papers/2410.24164) is a generalist vision-language-action transformer that converts multimodal observations and text instructions into robot actions for zero-shot task transfer.
+{% elif model_name == "pi0fast" %}
+[Pi0-Fast](https://huggingface.co/papers/2501.09747) is a variant of Pi0 that uses a new tokenization method called FAST, which enables training of an autoregressive vision-language-action policy for high-frequency robotic tasks with improved performance and reduced training time.
+{% elif model_name == "sac" %}
+[Soft Actor-Critic (SAC)](https://huggingface.co/papers/1801.01290) is an entropy-regularised actor-critic algorithm offering stable, sample-efficient learning in continuous-control environments.
+{% elif model_name == "reward_classifier" %}
+A reward classifier is a lightweight neural network that scores observations or trajectories for task success, providing a learned reward signal or offline evaluation when explicit rewards are unavailable.
+{% else %}
+_Model type not recognized — please update this template._
+{% endif %}
+
+This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
+See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
+
+---
+
+## How to Get Started with the Model
+
+For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
+Below is the short version on how to train and run inference/eval:
+
+### Train from scratch
+
+```bash
+python -m lerobot.scripts.train \
+ --dataset.repo_id=${HF_USER}/ \
+ --policy.type=act \
+ --output_dir=outputs/train/ \
+ --job_name=lerobot_training \
+ --policy.device=cuda \
+ --policy.repo_id=${HF_USER}/
+ --wandb.enable=true
+```
+
+_Writes checkpoints to `outputs/train//checkpoints/`._
+
+### Evaluate the policy/run inference
+
+```bash
+python -m lerobot.record \
+ --robot.type=so100_follower \
+ --dataset.repo_id=/eval_ \
+ --policy.path=/ \
+ --episodes=10
+```
+
+Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
+
+---
+
+## Model Details
+
+- **License:** {{ license | default("\[More Information Needed]", true) }}
diff --git a/lerobot/templates/visualize_dataset_homepage.html b/src/lerobot/templates/visualize_dataset_homepage.html
similarity index 97%
rename from lerobot/templates/visualize_dataset_homepage.html
rename to src/lerobot/templates/visualize_dataset_homepage.html
index adff07be72..19613afb5d 100644
--- a/lerobot/templates/visualize_dataset_homepage.html
+++ b/src/lerobot/templates/visualize_dataset_homepage.html
@@ -7,7 +7,7 @@
-Example Datasets: